sync w/ head.
3e5a4e683HKVU-sxtagrDasRB8eBVw linux-2.4.28-xen-sparse/mm/swapfile.c
41180721bNns9Na7w1nJ0ZVt8bhUNA linux-2.4.28-xen-sparse/mm/vmalloc.c
41505c57WAd5l1rlfCLNSCpx9J13vA linux-2.4.28-xen-sparse/net/core/skbuff.c
-40f562372u3A7_kfbYYixPHJJxYUxA linux-2.6.9-xen-sparse/arch/xen/Kconfig
-40f56237utH41NPukqHksuNf29IC9A linux-2.6.9-xen-sparse/arch/xen/Kconfig.drivers
-40f56237penAAlWVBVDpeQZNFIg8CA linux-2.6.9-xen-sparse/arch/xen/Makefile
-40f56237JTc60m1FRlUxkUaGSQKrNw linux-2.6.9-xen-sparse/arch/xen/boot/Makefile
-40f56237hRxbacU_3PdoAl6DjZ3Jnw linux-2.6.9-xen-sparse/arch/xen/configs/xen0_defconfig
-40f56237wubfjJKlfIzZlI3ZM2VgGA linux-2.6.9-xen-sparse/arch/xen/configs/xenU_defconfig
-40f56237Mta0yHNaMS_qtM2rge0qYA linux-2.6.9-xen-sparse/arch/xen/i386/Kconfig
-40f56238u2CJdXNpjsZgHBxeVyY-2g linux-2.6.9-xen-sparse/arch/xen/i386/Makefile
-40f56238eczveJ86k_4hNxCLRQIF-g linux-2.6.9-xen-sparse/arch/xen/i386/kernel/Makefile
-40f56238rXVTJQKbBuXXLH52qEArcg linux-2.6.9-xen-sparse/arch/xen/i386/kernel/cpu/Makefile
-40f562385s4lr6Zg92gExe7UQ4A76Q linux-2.6.9-xen-sparse/arch/xen/i386/kernel/cpu/common.c
-41ab440bnpxZdWShZrGgM9pPaz5rmA linux-2.6.9-xen-sparse/arch/xen/i386/kernel/cpu/mtrr/Makefile
-41ab440bBKWz-aEOEojU4PAMXe3Ppg linux-2.6.9-xen-sparse/arch/xen/i386/kernel/cpu/mtrr/main.c
-40f56238XDtHSijkAFlbv1PT8Bhw_Q linux-2.6.9-xen-sparse/arch/xen/i386/kernel/entry.S
-40f56238bnvciAuyzAiMkdzGErYt1A linux-2.6.9-xen-sparse/arch/xen/i386/kernel/head.S
-40f58a0d31M2EkuPbG94ns_nOi0PVA linux-2.6.9-xen-sparse/arch/xen/i386/kernel/i386_ksyms.c
-40faa751_zbZlAmLyQgCXdYekVFdWA linux-2.6.9-xen-sparse/arch/xen/i386/kernel/ioport.c
-40f562382aC3_Gt4RG-4ZsfvDRUg3Q linux-2.6.9-xen-sparse/arch/xen/i386/kernel/irq.c
-40f56238ue3YRsK52HG7iccNzP1AwQ linux-2.6.9-xen-sparse/arch/xen/i386/kernel/ldt.c
-4107adf1cNtsuOxOB4T6paAoY2R2PA linux-2.6.9-xen-sparse/arch/xen/i386/kernel/pci-dma.c
-40f56238a8iOVDEoostsbun_sy2i4g linux-2.6.9-xen-sparse/arch/xen/i386/kernel/process.c
-40f56238YQIJoYG2ehDGEcdTgLmGbg linux-2.6.9-xen-sparse/arch/xen/i386/kernel/setup.c
-40f56238nWMQg7CKbyTy0KJNvCzbtg linux-2.6.9-xen-sparse/arch/xen/i386/kernel/signal.c
-40f56238UL9uv78ODDzMwLL9yryeFw linux-2.6.9-xen-sparse/arch/xen/i386/kernel/sysenter.c
-40f56238qVGkpO_ycnQA8k03kQzAgA linux-2.6.9-xen-sparse/arch/xen/i386/kernel/time.c
-40f56238NzTgeO63RGoxHrW5NQeO3Q linux-2.6.9-xen-sparse/arch/xen/i386/kernel/timers/Makefile
-40f56238BMqG5PuSHufpjbvp_helBw linux-2.6.9-xen-sparse/arch/xen/i386/kernel/timers/timer_tsc.c
-40f562389xNa78YBZciUibQjyRU_Lg linux-2.6.9-xen-sparse/arch/xen/i386/kernel/traps.c
-40f56238qASEI_IOhCKWNuwFKNZrKQ linux-2.6.9-xen-sparse/arch/xen/i386/kernel/vmlinux.lds.S
-40f56238JypKAUG01ZojFwH7qnZ5uA linux-2.6.9-xen-sparse/arch/xen/i386/kernel/vsyscall.S
-40f56238wi6AdNQjm0RT57bSkwb6hg linux-2.6.9-xen-sparse/arch/xen/i386/kernel/vsyscall.lds
-40f56238a3w6-byOzexIlMgni76Lcg linux-2.6.9-xen-sparse/arch/xen/i386/mm/Makefile
-40f56238ILx8xlbywNbzTdv5Zr4xXQ linux-2.6.9-xen-sparse/arch/xen/i386/mm/fault.c
-4118cc35CbY8rfGVspF5O-7EkXBEAA linux-2.6.9-xen-sparse/arch/xen/i386/mm/highmem.c
-40f562383SKvDStdtrvzr5fyCbW4rw linux-2.6.9-xen-sparse/arch/xen/i386/mm/hypervisor.c
-40f56239xcNylAxuGsQHwi1AyMLV8w linux-2.6.9-xen-sparse/arch/xen/i386/mm/init.c
-41062ab7CjxC1UBaFhOMWWdhHkIUyg linux-2.6.9-xen-sparse/arch/xen/i386/mm/ioremap.c
-413b5ab8LIowAnQrEmaOJSdmqm96jQ linux-2.6.9-xen-sparse/arch/xen/i386/mm/pageattr.c
-40f5623906UYHv1rsVUeRc0tFT0dWw linux-2.6.9-xen-sparse/arch/xen/i386/mm/pgtable.c
-4107adf12ndy94MidCaivDibJ3pPAg linux-2.6.9-xen-sparse/arch/xen/i386/pci/Makefile
-4107adf1WcCgkhsdLTRGX52cOG1vJg linux-2.6.9-xen-sparse/arch/xen/i386/pci/direct.c
-4107adf1s5u6249DNPUViX1YNagbUQ linux-2.6.9-xen-sparse/arch/xen/i386/pci/irq.c
-40f56239zOksGg_H4XD4ye6iZNtoZA linux-2.6.9-xen-sparse/arch/xen/kernel/Makefile
-40f56239bvOjuuuViZ0XMlNiREFC0A linux-2.6.9-xen-sparse/arch/xen/kernel/ctrl_if.c
+40f562372u3A7_kfbYYixPHJJxYUxA linux-2.6.10-rc2-xen-sparse/arch/xen/Kconfig
+40f56237utH41NPukqHksuNf29IC9A linux-2.6.10-rc2-xen-sparse/arch/xen/Kconfig.drivers
+40f56237penAAlWVBVDpeQZNFIg8CA linux-2.6.10-rc2-xen-sparse/arch/xen/Makefile
+40f56237JTc60m1FRlUxkUaGSQKrNw linux-2.6.10-rc2-xen-sparse/arch/xen/boot/Makefile
+40f56237hRxbacU_3PdoAl6DjZ3Jnw linux-2.6.10-rc2-xen-sparse/arch/xen/configs/xen0_defconfig
+40f56237wubfjJKlfIzZlI3ZM2VgGA linux-2.6.10-rc2-xen-sparse/arch/xen/configs/xenU_defconfig
+40f56237Mta0yHNaMS_qtM2rge0qYA linux-2.6.10-rc2-xen-sparse/arch/xen/i386/Kconfig
+40f56238u2CJdXNpjsZgHBxeVyY-2g linux-2.6.10-rc2-xen-sparse/arch/xen/i386/Makefile
+40f56238eczveJ86k_4hNxCLRQIF-g linux-2.6.10-rc2-xen-sparse/arch/xen/i386/kernel/Makefile
+40f56238rXVTJQKbBuXXLH52qEArcg linux-2.6.10-rc2-xen-sparse/arch/xen/i386/kernel/cpu/Makefile
+40f562385s4lr6Zg92gExe7UQ4A76Q linux-2.6.10-rc2-xen-sparse/arch/xen/i386/kernel/cpu/common.c
+41ab440bnpxZdWShZrGgM9pPaz5rmA linux-2.6.10-rc2-xen-sparse/arch/xen/i386/kernel/cpu/mtrr/Makefile
+41ab440bBKWz-aEOEojU4PAMXe3Ppg linux-2.6.10-rc2-xen-sparse/arch/xen/i386/kernel/cpu/mtrr/main.c
+40f56238XDtHSijkAFlbv1PT8Bhw_Q linux-2.6.10-rc2-xen-sparse/arch/xen/i386/kernel/entry.S
+40f56238bnvciAuyzAiMkdzGErYt1A linux-2.6.10-rc2-xen-sparse/arch/xen/i386/kernel/head.S
+40f58a0d31M2EkuPbG94ns_nOi0PVA linux-2.6.10-rc2-xen-sparse/arch/xen/i386/kernel/i386_ksyms.c
+40faa751_zbZlAmLyQgCXdYekVFdWA linux-2.6.10-rc2-xen-sparse/arch/xen/i386/kernel/ioport.c
+40f56238ue3YRsK52HG7iccNzP1AwQ linux-2.6.10-rc2-xen-sparse/arch/xen/i386/kernel/ldt.c
+4107adf1cNtsuOxOB4T6paAoY2R2PA linux-2.6.10-rc2-xen-sparse/arch/xen/i386/kernel/pci-dma.c
+40f56238a8iOVDEoostsbun_sy2i4g linux-2.6.10-rc2-xen-sparse/arch/xen/i386/kernel/process.c
+40f56238YQIJoYG2ehDGEcdTgLmGbg linux-2.6.10-rc2-xen-sparse/arch/xen/i386/kernel/setup.c
+40f56238nWMQg7CKbyTy0KJNvCzbtg linux-2.6.10-rc2-xen-sparse/arch/xen/i386/kernel/signal.c
+40f56238qVGkpO_ycnQA8k03kQzAgA linux-2.6.10-rc2-xen-sparse/arch/xen/i386/kernel/time.c
+40f56238NzTgeO63RGoxHrW5NQeO3Q linux-2.6.10-rc2-xen-sparse/arch/xen/i386/kernel/timers/Makefile
+40f56238BMqG5PuSHufpjbvp_helBw linux-2.6.10-rc2-xen-sparse/arch/xen/i386/kernel/timers/timer_tsc.c
+40f562389xNa78YBZciUibQjyRU_Lg linux-2.6.10-rc2-xen-sparse/arch/xen/i386/kernel/traps.c
+40f56238JypKAUG01ZojFwH7qnZ5uA linux-2.6.10-rc2-xen-sparse/arch/xen/i386/kernel/vsyscall.S
+40f56238wi6AdNQjm0RT57bSkwb6hg linux-2.6.10-rc2-xen-sparse/arch/xen/i386/kernel/vsyscall.lds
+40f56238a3w6-byOzexIlMgni76Lcg linux-2.6.10-rc2-xen-sparse/arch/xen/i386/mm/Makefile
+40f56238ILx8xlbywNbzTdv5Zr4xXQ linux-2.6.10-rc2-xen-sparse/arch/xen/i386/mm/fault.c
+4118cc35CbY8rfGVspF5O-7EkXBEAA linux-2.6.10-rc2-xen-sparse/arch/xen/i386/mm/highmem.c
+40f562383SKvDStdtrvzr5fyCbW4rw linux-2.6.10-rc2-xen-sparse/arch/xen/i386/mm/hypervisor.c
+40f56239xcNylAxuGsQHwi1AyMLV8w linux-2.6.10-rc2-xen-sparse/arch/xen/i386/mm/init.c
+41062ab7CjxC1UBaFhOMWWdhHkIUyg linux-2.6.10-rc2-xen-sparse/arch/xen/i386/mm/ioremap.c
+413b5ab8LIowAnQrEmaOJSdmqm96jQ linux-2.6.10-rc2-xen-sparse/arch/xen/i386/mm/pageattr.c
+40f5623906UYHv1rsVUeRc0tFT0dWw linux-2.6.10-rc2-xen-sparse/arch/xen/i386/mm/pgtable.c
+4107adf12ndy94MidCaivDibJ3pPAg linux-2.6.10-rc2-xen-sparse/arch/xen/i386/pci/Makefile
+4107adf1WcCgkhsdLTRGX52cOG1vJg linux-2.6.10-rc2-xen-sparse/arch/xen/i386/pci/direct.c
+4107adf1s5u6249DNPUViX1YNagbUQ linux-2.6.10-rc2-xen-sparse/arch/xen/i386/pci/irq.c
+40f56239zOksGg_H4XD4ye6iZNtoZA linux-2.6.10-rc2-xen-sparse/arch/xen/kernel/Makefile
+40f56239bvOjuuuViZ0XMlNiREFC0A linux-2.6.10-rc2-xen-sparse/arch/xen/kernel/ctrl_if.c
+40f56238xFQe9T7M_U_FItM-bZIpLw linux-2.6.10-rc2-xen-sparse/arch/xen/kernel/evtchn.c
+4110f478aeQWllIN7J4kouAHiAqrPw linux-2.6.10-rc2-xen-sparse/arch/xen/kernel/fixup.c
+412dfae9eA3_6e6bCGUtg1mj8b56fQ linux-2.6.10-rc2-xen-sparse/arch/xen/kernel/gnttab.c
+40f562392LBhwmOxVPsYdkYXMxI_ZQ linux-2.6.10-rc2-xen-sparse/arch/xen/kernel/reboot.c
+414c113396tK1HTVeUalm3u-1DF16g linux-2.6.10-rc2-xen-sparse/arch/xen/kernel/skbuff.c
+3f68905c5eiA-lBMQSvXLMWS1ikDEA linux-2.6.10-rc2-xen-sparse/arch/xen/kernel/xen_proc.c
+41261688yS8eAyy-7kzG4KBs0xbYCA linux-2.6.10-rc2-xen-sparse/drivers/Makefile
+4108f5c1WfTIrs0HZFeV39sttekCTw linux-2.6.10-rc2-xen-sparse/drivers/char/mem.c
+4111308bZAIzwf_Kzu6x1TZYZ3E0_Q linux-2.6.10-rc2-xen-sparse/drivers/char/tty_io.c
+40f56239Dp_vMTgz8TEbvo1hjHGc3w linux-2.6.10-rc2-xen-sparse/drivers/xen/Makefile
+41768fbcncpBQf8s2l2-CwoSNIZ9uA linux-2.6.10-rc2-xen-sparse/drivers/xen/balloon/Makefile
+3e6377f8i5e9eGz7Pw6fQuhuTQ7DQg linux-2.6.10-rc2-xen-sparse/drivers/xen/balloon/balloon.c
+410d0893otFGghmv4dUXDUBBdY5aIA linux-2.6.10-rc2-xen-sparse/drivers/xen/blkback/Makefile
+4087cf0d1XgMkooTZAiJS6NrcpLQNQ linux-2.6.10-rc2-xen-sparse/drivers/xen/blkback/blkback.c
+4087cf0dZadZ8r6CEt4fNN350Yle3A linux-2.6.10-rc2-xen-sparse/drivers/xen/blkback/common.h
+4087cf0dxlh29iw0w-9rxOCEGCjPcw linux-2.6.10-rc2-xen-sparse/drivers/xen/blkback/control.c
+4087cf0dbuoH20fMjNZjcgrRK-1msQ linux-2.6.10-rc2-xen-sparse/drivers/xen/blkback/interface.c
+4087cf0dk97tacDzxfByWV7JifUYqA linux-2.6.10-rc2-xen-sparse/drivers/xen/blkback/vbd.c
+40f56239Sfle6wGv5FS0wjS_HI150A linux-2.6.10-rc2-xen-sparse/drivers/xen/blkfront/Kconfig
+40f562395atl9x4suKGhPkjqLOXESg linux-2.6.10-rc2-xen-sparse/drivers/xen/blkfront/Makefile
+40f56239-JNIaTzlviVJohVdoYOUpw linux-2.6.10-rc2-xen-sparse/drivers/xen/blkfront/blkfront.c
+40f56239y9naBTXe40Pi2J_z3p-d1g linux-2.6.10-rc2-xen-sparse/drivers/xen/blkfront/block.h
+40f56239BVfPsXBiWQitXgDRtOsiqg linux-2.6.10-rc2-xen-sparse/drivers/xen/blkfront/vbd.c
+40f56239fsLjvtD8YBRAWphps4FDjg linux-2.6.10-rc2-xen-sparse/drivers/xen/console/Makefile
+3e5a4e651TH-SXHoufurnWjgl5bfOA linux-2.6.10-rc2-xen-sparse/drivers/xen/console/console.c
+40f56239KYxO0YabhPzCTeUuln-lnA linux-2.6.10-rc2-xen-sparse/drivers/xen/evtchn/Makefile
+40f56239DoibTX6R-ZYd3QTXAB8_TA linux-2.6.10-rc2-xen-sparse/drivers/xen/evtchn/evtchn.c
+410a9817HEVJvred5Oy_uKH3HFJC5Q linux-2.6.10-rc2-xen-sparse/drivers/xen/netback/Makefile
+4097ba831lpGeLlPg-bfV8XarVVuoQ linux-2.6.10-rc2-xen-sparse/drivers/xen/netback/common.h
+4097ba83wvv8yi5P5xugCUBAdb6O-A linux-2.6.10-rc2-xen-sparse/drivers/xen/netback/control.c
+4097ba83byY5bTSugJGZ1exTxIcMKw linux-2.6.10-rc2-xen-sparse/drivers/xen/netback/interface.c
+4087cf0dGmSbFhFZyIZBJzvqxY-qBw linux-2.6.10-rc2-xen-sparse/drivers/xen/netback/netback.c
+40f56239lrg_Ob0BJ8WBFS1zeg2CYw linux-2.6.10-rc2-xen-sparse/drivers/xen/netfront/Kconfig
+40f56239Wd4k_ycG_mFsSO1r5xKdtQ linux-2.6.10-rc2-xen-sparse/drivers/xen/netfront/Makefile
+405853f6nbeazrNyEWNHBuoSg2PiPA linux-2.6.10-rc2-xen-sparse/drivers/xen/netfront/netfront.c
+4108f5c1ppFXVpQzCOAZ6xXYubsjKA linux-2.6.10-rc2-xen-sparse/drivers/xen/privcmd/Makefile
+3e5a4e65IUfzzMu2kZFlGEB8-rpTaA linux-2.6.10-rc2-xen-sparse/drivers/xen/privcmd/privcmd.c
+412f47e4RKD-R5IS5gEXvcT8L4v8gA linux-2.6.10-rc2-xen-sparse/include/asm-generic/pgtable.h
+40f56239YAjS52QG2FIAQpHDZAdGHg linux-2.6.10-rc2-xen-sparse/include/asm-xen/asm-i386/desc.h
+4107adf1E5O4ztGHNGMzCCNhcvqNow linux-2.6.10-rc2-xen-sparse/include/asm-xen/asm-i386/dma-mapping.h
+40f5623akIoBsQ3KxSB2kufkbgONXQ linux-2.6.10-rc2-xen-sparse/include/asm-xen/asm-i386/fixmap.h
+41979925z1MsKU1SfuuheM1IFDQ_bA linux-2.6.10-rc2-xen-sparse/include/asm-xen/asm-i386/floppy.h
+4118b6a418gnL6AZsTdglC92YGqYTg linux-2.6.10-rc2-xen-sparse/include/asm-xen/asm-i386/highmem.h
+40f5623aJVXQwpJMOLE99XgvGsfQ8Q linux-2.6.10-rc2-xen-sparse/include/asm-xen/asm-i386/io.h
+40f5623aKXkBBxgpLx2NcvkncQ1Yyw linux-2.6.10-rc2-xen-sparse/include/asm-xen/asm-i386/mach-xen/irq_vectors.h
+40f5623aDMCsWOFO0jktZ4e8sjwvEg linux-2.6.10-rc2-xen-sparse/include/asm-xen/asm-i386/mach-xen/setup_arch_post.h
+40f5623arsFXkGdPvIqvFi3yFXGR0Q linux-2.6.10-rc2-xen-sparse/include/asm-xen/asm-i386/mach-xen/setup_arch_pre.h
+4120f807GCO0uqsLqdZj9csxR1Wthw linux-2.6.10-rc2-xen-sparse/include/asm-xen/asm-i386/mmu_context.h
+40f5623aFTyFTR-vdiA-KaGxk5JOKQ linux-2.6.10-rc2-xen-sparse/include/asm-xen/asm-i386/msr.h
+40f5623adgjZq9nAgCt0IXdWl7udSA linux-2.6.10-rc2-xen-sparse/include/asm-xen/asm-i386/page.h
+40f5623a54NuG-7qHihGYmw4wWQnMA linux-2.6.10-rc2-xen-sparse/include/asm-xen/asm-i386/param.h
+41137cc1kkvg0cg7uxddcEfjL7L67w linux-2.6.10-rc2-xen-sparse/include/asm-xen/asm-i386/pci.h
+40f5623atCokYc2uCysSJ8jFO8TEsw linux-2.6.10-rc2-xen-sparse/include/asm-xen/asm-i386/pgalloc.h
+412e01beTwiaC8sYY4XJP8PxLST5CA linux-2.6.10-rc2-xen-sparse/include/asm-xen/asm-i386/pgtable-2level-defs.h
+40f5623aEToIXouJgO-ao5d5pcEt1w linux-2.6.10-rc2-xen-sparse/include/asm-xen/asm-i386/pgtable-2level.h
+40f5623aCCXRPlGpNthVXstGz9ZV3A linux-2.6.10-rc2-xen-sparse/include/asm-xen/asm-i386/pgtable.h
+40f5623aPCkQQfPtJSooGdhcatrvnQ linux-2.6.10-rc2-xen-sparse/include/asm-xen/asm-i386/processor.h
+412ea0afQL2CAI-f522TbLjLPMibPQ linux-2.6.10-rc2-xen-sparse/include/asm-xen/asm-i386/ptrace.h
+40f5623bzLvxr7WoJIxVf2OH4rCBJg linux-2.6.10-rc2-xen-sparse/include/asm-xen/asm-i386/segment.h
+40f5623bG_LzgG6-qwk292nTc5Wabw linux-2.6.10-rc2-xen-sparse/include/asm-xen/asm-i386/setup.h
+40f5623bgzm_9vwxpzJswlAxg298Gg linux-2.6.10-rc2-xen-sparse/include/asm-xen/asm-i386/synch_bitops.h
+40f5623bVdKP7Dt7qm8twu3NcnGNbA linux-2.6.10-rc2-xen-sparse/include/asm-xen/asm-i386/system.h
+40f5623bc8LKPRO09wY5dGDnY_YCpw linux-2.6.10-rc2-xen-sparse/include/asm-xen/asm-i386/tlbflush.h
+41062ab7uFxnCq-KtPeAm-aV8CicgA linux-2.6.10-rc2-xen-sparse/include/asm-xen/asm-i386/vga.h
+40f5623bxUbeGjkRrjDguCy_Gm8RLw linux-2.6.10-rc2-xen-sparse/include/asm-xen/asm-i386/xor.h
+40f5623bYNP7tHE2zX6YQxp9Zq2utQ linux-2.6.10-rc2-xen-sparse/include/asm-xen/ctrl_if.h
+40f5623b3Eqs8pAc5WpPX8_jTzV2qw linux-2.6.10-rc2-xen-sparse/include/asm-xen/evtchn.h
+419b4e9367PjTEvdjwavWN12BeBBXg linux-2.6.10-rc2-xen-sparse/include/asm-xen/foreign_page.h
+412dfaeazclyNDM0cpnp60Yo4xulpQ linux-2.6.10-rc2-xen-sparse/include/asm-xen/gnttab.h
+40f5623aGPlsm0u1LTO-NVZ6AGzNRQ linux-2.6.10-rc2-xen-sparse/include/asm-xen/hypervisor.h
+3f108af1ylCIm82H052FVTfXACBHrw linux-2.6.10-rc2-xen-sparse/include/asm-xen/linux-public/privcmd.h
+3fa8e3f0kBLeE4To2vpdi3cpJbIkbQ linux-2.6.10-rc2-xen-sparse/include/asm-xen/linux-public/suspend.h
+40f5623cndVUFlkxpf7Lfx7xu8madQ linux-2.6.10-rc2-xen-sparse/include/asm-xen/multicall.h
+4122466356eIBnC9ot44WSVVIFyhQA linux-2.6.10-rc2-xen-sparse/include/asm-xen/queues.h
+3f689063BoW-HWV3auUJ-OqXfcGArw linux-2.6.10-rc2-xen-sparse/include/asm-xen/xen_proc.h
+419b4e93z2S0gR17XTy8wg09JEwAhg linux-2.6.10-rc2-xen-sparse/include/linux/gfp.h
+419dfc609zbti8rqL60tL2dHXQ_rvQ linux-2.6.10-rc2-xen-sparse/include/linux/irq.h
+4124f66f4NaKNa0xPiGGykn9QaZk3w linux-2.6.10-rc2-xen-sparse/include/linux/skbuff.h
+419dfc6awx7w88wk6cG9P3mPidX6LQ linux-2.6.10-rc2-xen-sparse/kernel/irq/manage.c
+40f56a0ddHCSs3501MY4hRf22tctOw linux-2.6.10-rc2-xen-sparse/mkbuildtree
+412f46c0LJuKAgSPGoC0Z1DEkLfuLA linux-2.6.10-rc2-xen-sparse/mm/memory.c
+410a94a4KT6I6X0LVc7djB39tRDp4g linux-2.6.10-rc2-xen-sparse/mm/page_alloc.c
+41505c572m-s9ATiO1LiD1GPznTTIg linux-2.6.10-rc2-xen-sparse/net/core/skbuff.c
+4149ec79wMpIHdvbntxqVGLRZZjPxw linux-2.6.10-rc2-xen-sparse/net/ipv4/raw.c
+ 41ab6fa06JdF7jxUsuDcjN3UhuIAxg linux-2.6.9-xen-sparse/arch/xen/kernel/devmem.c
-40f56238xFQe9T7M_U_FItM-bZIpLw linux-2.6.9-xen-sparse/arch/xen/kernel/evtchn.c
-4110f478aeQWllIN7J4kouAHiAqrPw linux-2.6.9-xen-sparse/arch/xen/kernel/fixup.c
-412dfae9eA3_6e6bCGUtg1mj8b56fQ linux-2.6.9-xen-sparse/arch/xen/kernel/gnttab.c
-40f562392LBhwmOxVPsYdkYXMxI_ZQ linux-2.6.9-xen-sparse/arch/xen/kernel/reboot.c
-414c113396tK1HTVeUalm3u-1DF16g linux-2.6.9-xen-sparse/arch/xen/kernel/skbuff.c
-3f68905c5eiA-lBMQSvXLMWS1ikDEA linux-2.6.9-xen-sparse/arch/xen/kernel/xen_proc.c
-41261688yS8eAyy-7kzG4KBs0xbYCA linux-2.6.9-xen-sparse/drivers/Makefile
-4108f5c1WfTIrs0HZFeV39sttekCTw linux-2.6.9-xen-sparse/drivers/char/mem.c
-4111308bZAIzwf_Kzu6x1TZYZ3E0_Q linux-2.6.9-xen-sparse/drivers/char/tty_io.c
-40f56239Dp_vMTgz8TEbvo1hjHGc3w linux-2.6.9-xen-sparse/drivers/xen/Makefile
-41768fbcncpBQf8s2l2-CwoSNIZ9uA linux-2.6.9-xen-sparse/drivers/xen/balloon/Makefile
-3e6377f8i5e9eGz7Pw6fQuhuTQ7DQg linux-2.6.9-xen-sparse/drivers/xen/balloon/balloon.c
-410d0893otFGghmv4dUXDUBBdY5aIA linux-2.6.9-xen-sparse/drivers/xen/blkback/Makefile
-4087cf0d1XgMkooTZAiJS6NrcpLQNQ linux-2.6.9-xen-sparse/drivers/xen/blkback/blkback.c
-4087cf0dZadZ8r6CEt4fNN350Yle3A linux-2.6.9-xen-sparse/drivers/xen/blkback/common.h
-4087cf0dxlh29iw0w-9rxOCEGCjPcw linux-2.6.9-xen-sparse/drivers/xen/blkback/control.c
-4087cf0dbuoH20fMjNZjcgrRK-1msQ linux-2.6.9-xen-sparse/drivers/xen/blkback/interface.c
-4087cf0dk97tacDzxfByWV7JifUYqA linux-2.6.9-xen-sparse/drivers/xen/blkback/vbd.c
-40f56239Sfle6wGv5FS0wjS_HI150A linux-2.6.9-xen-sparse/drivers/xen/blkfront/Kconfig
-40f562395atl9x4suKGhPkjqLOXESg linux-2.6.9-xen-sparse/drivers/xen/blkfront/Makefile
-40f56239-JNIaTzlviVJohVdoYOUpw linux-2.6.9-xen-sparse/drivers/xen/blkfront/blkfront.c
-40f56239y9naBTXe40Pi2J_z3p-d1g linux-2.6.9-xen-sparse/drivers/xen/blkfront/block.h
-40f56239BVfPsXBiWQitXgDRtOsiqg linux-2.6.9-xen-sparse/drivers/xen/blkfront/vbd.c
-40f56239fsLjvtD8YBRAWphps4FDjg linux-2.6.9-xen-sparse/drivers/xen/console/Makefile
-3e5a4e651TH-SXHoufurnWjgl5bfOA linux-2.6.9-xen-sparse/drivers/xen/console/console.c
-40f56239KYxO0YabhPzCTeUuln-lnA linux-2.6.9-xen-sparse/drivers/xen/evtchn/Makefile
-40f56239DoibTX6R-ZYd3QTXAB8_TA linux-2.6.9-xen-sparse/drivers/xen/evtchn/evtchn.c
-410a9817HEVJvred5Oy_uKH3HFJC5Q linux-2.6.9-xen-sparse/drivers/xen/netback/Makefile
-4097ba831lpGeLlPg-bfV8XarVVuoQ linux-2.6.9-xen-sparse/drivers/xen/netback/common.h
-4097ba83wvv8yi5P5xugCUBAdb6O-A linux-2.6.9-xen-sparse/drivers/xen/netback/control.c
-4097ba83byY5bTSugJGZ1exTxIcMKw linux-2.6.9-xen-sparse/drivers/xen/netback/interface.c
-4087cf0dGmSbFhFZyIZBJzvqxY-qBw linux-2.6.9-xen-sparse/drivers/xen/netback/netback.c
-40f56239lrg_Ob0BJ8WBFS1zeg2CYw linux-2.6.9-xen-sparse/drivers/xen/netfront/Kconfig
-40f56239Wd4k_ycG_mFsSO1r5xKdtQ linux-2.6.9-xen-sparse/drivers/xen/netfront/Makefile
-405853f6nbeazrNyEWNHBuoSg2PiPA linux-2.6.9-xen-sparse/drivers/xen/netfront/netfront.c
-4108f5c1ppFXVpQzCOAZ6xXYubsjKA linux-2.6.9-xen-sparse/drivers/xen/privcmd/Makefile
-3e5a4e65IUfzzMu2kZFlGEB8-rpTaA linux-2.6.9-xen-sparse/drivers/xen/privcmd/privcmd.c
-412f47e4RKD-R5IS5gEXvcT8L4v8gA linux-2.6.9-xen-sparse/include/asm-generic/pgtable.h
-40f56239YAjS52QG2FIAQpHDZAdGHg linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/desc.h
-4107adf1E5O4ztGHNGMzCCNhcvqNow linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/dma-mapping.h
-40f5623akIoBsQ3KxSB2kufkbgONXQ linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/fixmap.h
-41979925z1MsKU1SfuuheM1IFDQ_bA linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/floppy.h
-4118b6a418gnL6AZsTdglC92YGqYTg linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/highmem.h
-40f5623aJVXQwpJMOLE99XgvGsfQ8Q linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/io.h
-40f5623aKXkBBxgpLx2NcvkncQ1Yyw linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/mach-xen/irq_vectors.h
-40f5623aDMCsWOFO0jktZ4e8sjwvEg linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/mach-xen/setup_arch_post.h
-40f5623arsFXkGdPvIqvFi3yFXGR0Q linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/mach-xen/setup_arch_pre.h
-4120f807GCO0uqsLqdZj9csxR1Wthw linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/mmu_context.h
-40f5623aFTyFTR-vdiA-KaGxk5JOKQ linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/msr.h
-40f5623adgjZq9nAgCt0IXdWl7udSA linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/page.h
-40f5623a54NuG-7qHihGYmw4wWQnMA linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/param.h
-41137cc1kkvg0cg7uxddcEfjL7L67w linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/pci.h
-40f5623atCokYc2uCysSJ8jFO8TEsw linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/pgalloc.h
-412e01beTwiaC8sYY4XJP8PxLST5CA linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/pgtable-2level-defs.h
-40f5623aEToIXouJgO-ao5d5pcEt1w linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/pgtable-2level.h
-40f5623aCCXRPlGpNthVXstGz9ZV3A linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/pgtable.h
-40f5623aPCkQQfPtJSooGdhcatrvnQ linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/processor.h
-412ea0afQL2CAI-f522TbLjLPMibPQ linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/ptrace.h
-40f5623bzLvxr7WoJIxVf2OH4rCBJg linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/segment.h
-40f5623bG_LzgG6-qwk292nTc5Wabw linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/setup.h
-40f5623bgzm_9vwxpzJswlAxg298Gg linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/synch_bitops.h
-40f5623bVdKP7Dt7qm8twu3NcnGNbA linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/system.h
-40f5623bc8LKPRO09wY5dGDnY_YCpw linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/tlbflush.h
-41062ab7uFxnCq-KtPeAm-aV8CicgA linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/vga.h
-40f5623bxUbeGjkRrjDguCy_Gm8RLw linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/xor.h
+ 41af4017PDMuSmMWtSRU5UC9Vylw5g linux-2.6.9-xen-sparse/include/asm-xen/balloon.h
-40f5623bYNP7tHE2zX6YQxp9Zq2utQ linux-2.6.9-xen-sparse/include/asm-xen/ctrl_if.h
-40f5623b3Eqs8pAc5WpPX8_jTzV2qw linux-2.6.9-xen-sparse/include/asm-xen/evtchn.h
-419b4e9367PjTEvdjwavWN12BeBBXg linux-2.6.9-xen-sparse/include/asm-xen/foreign_page.h
-412dfaeazclyNDM0cpnp60Yo4xulpQ linux-2.6.9-xen-sparse/include/asm-xen/gnttab.h
-40f5623aGPlsm0u1LTO-NVZ6AGzNRQ linux-2.6.9-xen-sparse/include/asm-xen/hypervisor.h
-3f108af1ylCIm82H052FVTfXACBHrw linux-2.6.9-xen-sparse/include/asm-xen/linux-public/privcmd.h
-3fa8e3f0kBLeE4To2vpdi3cpJbIkbQ linux-2.6.9-xen-sparse/include/asm-xen/linux-public/suspend.h
-40f5623cndVUFlkxpf7Lfx7xu8madQ linux-2.6.9-xen-sparse/include/asm-xen/multicall.h
-4122466356eIBnC9ot44WSVVIFyhQA linux-2.6.9-xen-sparse/include/asm-xen/queues.h
-3f689063BoW-HWV3auUJ-OqXfcGArw linux-2.6.9-xen-sparse/include/asm-xen/xen_proc.h
-4124d8c4aocX7A-jIbuGraWN84pxGQ linux-2.6.9-xen-sparse/include/linux/bio.h
-419b4e93z2S0gR17XTy8wg09JEwAhg linux-2.6.9-xen-sparse/include/linux/gfp.h
-4124f66f4NaKNa0xPiGGykn9QaZk3w linux-2.6.9-xen-sparse/include/linux/skbuff.h
-40f56a0ddHCSs3501MY4hRf22tctOw linux-2.6.9-xen-sparse/mkbuildtree
-412f46c0LJuKAgSPGoC0Z1DEkLfuLA linux-2.6.9-xen-sparse/mm/memory.c
-410a94a4KT6I6X0LVc7djB39tRDp4g linux-2.6.9-xen-sparse/mm/page_alloc.c
-41505c572m-s9ATiO1LiD1GPznTTIg linux-2.6.9-xen-sparse/net/core/skbuff.c
-4149ec79wMpIHdvbntxqVGLRZZjPxw linux-2.6.9-xen-sparse/net/ipv4/raw.c
413cb1e4zst25MDYjg63Y-NGC5_pLg netbsd-2.0-xen-sparse/Makefile
413cb1e5c_Mkxf_X0zimEhTKI_l4DA netbsd-2.0-xen-sparse/mkbuildtree
413cb1e5kY_Zil7-b0kI6hvCIxBEYg netbsd-2.0-xen-sparse/nbconfig-xen
--- /dev/null
- # Fri Nov 19 20:16:38 2004
+#
+# Automatically generated make config: don't edit
+# Linux kernel version: 2.6.10-rc2-xen0
- # CONFIG_DEBUG_SLAB is not set
++# Wed Dec 1 09:22:49 2004
+#
+CONFIG_XEN=y
+CONFIG_ARCH_XEN=y
+CONFIG_NO_IDLE_HZ=y
+
+#
+# XEN
+#
+CONFIG_XEN_PRIVILEGED_GUEST=y
+CONFIG_XEN_PHYSDEV_ACCESS=y
+CONFIG_XEN_BLKDEV_BACKEND=y
+CONFIG_XEN_NETDEV_BACKEND=y
+CONFIG_XEN_BLKDEV_FRONTEND=y
+CONFIG_XEN_NETDEV_FRONTEND=y
+# CONFIG_XEN_NETDEV_FRONTEND_PIPELINED_TRANSMITTER is not set
+CONFIG_XEN_WRITABLE_PAGETABLES=y
+CONFIG_XEN_SCRUB_PAGES=y
+CONFIG_HAVE_ARCH_DEV_ALLOC_SKB=y
+CONFIG_X86=y
+# CONFIG_X86_64 is not set
+
+#
+# Code maturity level options
+#
+CONFIG_EXPERIMENTAL=y
+# CONFIG_CLEAN_COMPILE is not set
+CONFIG_BROKEN=y
+CONFIG_BROKEN_ON_SMP=y
+CONFIG_LOCK_KERNEL=y
+
+#
+# General setup
+#
+CONFIG_LOCALVERSION=""
+CONFIG_SWAP=y
+CONFIG_SYSVIPC=y
+# CONFIG_POSIX_MQUEUE is not set
+# CONFIG_BSD_PROCESS_ACCT is not set
+CONFIG_SYSCTL=y
+# CONFIG_AUDIT is not set
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_HOTPLUG=y
+CONFIG_KOBJECT_UEVENT=y
+# CONFIG_IKCONFIG is not set
+# CONFIG_EMBEDDED is not set
+CONFIG_KALLSYMS=y
+# CONFIG_KALLSYMS_ALL is not set
+# CONFIG_KALLSYMS_EXTRA_PASS is not set
+CONFIG_FUTEX=y
+CONFIG_EPOLL=y
+# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
+CONFIG_SHMEM=y
+CONFIG_CC_ALIGN_FUNCTIONS=0
+CONFIG_CC_ALIGN_LABELS=0
+CONFIG_CC_ALIGN_LOOPS=0
+CONFIG_CC_ALIGN_JUMPS=0
+# CONFIG_TINY_SHMEM is not set
+
+#
+# Loadable module support
+#
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_MODULE_FORCE_UNLOAD is not set
+CONFIG_OBSOLETE_MODPARM=y
+# CONFIG_MODVERSIONS is not set
+# CONFIG_MODULE_SRCVERSION_ALL is not set
+CONFIG_KMOD=y
+
+#
+# X86 Processor Configuration
+#
+CONFIG_XENARCH="i386"
+CONFIG_MMU=y
+CONFIG_UID16=y
+CONFIG_GENERIC_ISA_DMA=y
+CONFIG_GENERIC_IOMAP=y
+# CONFIG_M686 is not set
+# CONFIG_MPENTIUMII is not set
+# CONFIG_MPENTIUMIII is not set
+# CONFIG_MPENTIUMM is not set
+CONFIG_MPENTIUM4=y
+# CONFIG_MK6 is not set
+# CONFIG_MK7 is not set
+# CONFIG_MK8 is not set
+# CONFIG_MCRUSOE is not set
+# CONFIG_MCYRIXIII is not set
+# CONFIG_MVIAC3_2 is not set
+# CONFIG_X86_GENERIC is not set
+CONFIG_X86_CMPXCHG=y
+CONFIG_X86_XADD=y
+CONFIG_X86_L1_CACHE_SHIFT=7
+CONFIG_RWSEM_XCHGADD_ALGORITHM=y
+CONFIG_X86_WP_WORKS_OK=y
+CONFIG_X86_INVLPG=y
+CONFIG_X86_BSWAP=y
+CONFIG_X86_POPAD_OK=y
+CONFIG_X86_GOOD_APIC=y
+CONFIG_X86_INTEL_USERCOPY=y
+CONFIG_X86_USE_PPRO_CHECKSUM=y
+# CONFIG_HPET_TIMER is not set
+# CONFIG_HPET_EMULATE_RTC is not set
+# CONFIG_SMP is not set
+CONFIG_PREEMPT=y
+CONFIG_X86_CPUID=y
+
+#
+# Firmware Drivers
+#
+# CONFIG_EDD is not set
+CONFIG_NOHIGHMEM=y
+# CONFIG_HIGHMEM4G is not set
+CONFIG_MTRR=y
+CONFIG_HAVE_DEC_LOCK=y
+# CONFIG_REGPARM is not set
+
+#
+# Bus options (PCI, PCMCIA, EISA, MCA, ISA)
+#
+CONFIG_PCI=y
+CONFIG_PCI_DIRECT=y
+CONFIG_PCI_LEGACY_PROC=y
+# CONFIG_PCI_NAMES is not set
+CONFIG_ISA=y
+# CONFIG_EISA is not set
+# CONFIG_MCA is not set
+# CONFIG_SCx200 is not set
+
+#
+# PCCARD (PCMCIA/CardBus) support
+#
+# CONFIG_PCCARD is not set
+
+#
+# PC-card bridges
+#
+CONFIG_PCMCIA_PROBE=y
+
+#
+# PCI Hotplug Support
+#
+# CONFIG_HOTPLUG_PCI is not set
+
+#
+# Kernel hacking
+#
+CONFIG_DEBUG_KERNEL=y
+CONFIG_EARLY_PRINTK=y
+# CONFIG_DEBUG_STACKOVERFLOW is not set
+# CONFIG_DEBUG_STACK_USAGE is not set
- # CONFIG_DEBUG_PAGEALLOC is not set
++CONFIG_DEBUG_SLAB=y
+CONFIG_MAGIC_SYSRQ=y
+# CONFIG_DEBUG_SPINLOCK is not set
++CONFIG_DEBUG_PAGEALLOC=y
+# CONFIG_DEBUG_INFO is not set
+# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
+# CONFIG_FRAME_POINTER is not set
+# CONFIG_4KSTACKS is not set
+CONFIG_GENERIC_HARDIRQS=y
+CONFIG_GENERIC_IRQ_PROBE=y
+CONFIG_X86_BIOS_REBOOT=y
+CONFIG_PC=y
+
+#
+# Executable file formats
+#
+CONFIG_BINFMT_ELF=y
+# CONFIG_BINFMT_AOUT is not set
+# CONFIG_BINFMT_MISC is not set
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+# CONFIG_STANDALONE is not set
+CONFIG_PREVENT_FIRMWARE_BUILD=y
+# CONFIG_FW_LOADER is not set
+# CONFIG_DEBUG_DRIVER is not set
+
+#
+# Memory Technology Devices (MTD)
+#
+# CONFIG_MTD is not set
+
+#
+# Parallel port support
+#
+# CONFIG_PARPORT is not set
+
+#
+# Plug and Play support
+#
+# CONFIG_PNP is not set
+
+#
+# Block devices
+#
+CONFIG_BLK_DEV_FD=y
+# CONFIG_BLK_DEV_XD is not set
+# CONFIG_BLK_CPQ_DA is not set
+CONFIG_BLK_CPQ_CISS_DA=y
+# CONFIG_CISS_SCSI_TAPE is not set
+# CONFIG_BLK_DEV_DAC960 is not set
+# CONFIG_BLK_DEV_UMEM is not set
+CONFIG_BLK_DEV_LOOP=y
+# CONFIG_BLK_DEV_CRYPTOLOOP is not set
+# CONFIG_BLK_DEV_NBD is not set
+# CONFIG_BLK_DEV_SX8 is not set
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=4096
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE=""
+# CONFIG_LBD is not set
+# CONFIG_CDROM_PKTCDVD is not set
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+CONFIG_IOSCHED_AS=y
+CONFIG_IOSCHED_DEADLINE=y
+CONFIG_IOSCHED_CFQ=y
+
+#
+# ATA/ATAPI/MFM/RLL support
+#
+CONFIG_IDE=y
+CONFIG_BLK_DEV_IDE=y
+
+#
+# Please see Documentation/ide.txt for help/info on IDE drives
+#
+# CONFIG_BLK_DEV_IDE_SATA is not set
+# CONFIG_BLK_DEV_HD_IDE is not set
+CONFIG_BLK_DEV_IDEDISK=y
+# CONFIG_IDEDISK_MULTI_MODE is not set
+CONFIG_BLK_DEV_IDECD=y
+# CONFIG_BLK_DEV_IDETAPE is not set
+# CONFIG_BLK_DEV_IDEFLOPPY is not set
+# CONFIG_BLK_DEV_IDESCSI is not set
+# CONFIG_IDE_TASK_IOCTL is not set
+
+#
+# IDE chipset support/bugfixes
+#
+CONFIG_IDE_GENERIC=y
+# CONFIG_BLK_DEV_CMD640 is not set
+CONFIG_BLK_DEV_IDEPCI=y
+# CONFIG_IDEPCI_SHARE_IRQ is not set
+# CONFIG_BLK_DEV_OFFBOARD is not set
+CONFIG_BLK_DEV_GENERIC=y
+# CONFIG_BLK_DEV_OPTI621 is not set
+# CONFIG_BLK_DEV_RZ1000 is not set
+CONFIG_BLK_DEV_IDEDMA_PCI=y
+# CONFIG_BLK_DEV_IDEDMA_FORCED is not set
+CONFIG_IDEDMA_PCI_AUTO=y
+# CONFIG_IDEDMA_ONLYDISK is not set
+# CONFIG_BLK_DEV_AEC62XX is not set
+# CONFIG_BLK_DEV_ALI15X3 is not set
+# CONFIG_BLK_DEV_AMD74XX is not set
+# CONFIG_BLK_DEV_ATIIXP is not set
+# CONFIG_BLK_DEV_CMD64X is not set
+# CONFIG_BLK_DEV_TRIFLEX is not set
+# CONFIG_BLK_DEV_CY82C693 is not set
+# CONFIG_BLK_DEV_CS5520 is not set
+# CONFIG_BLK_DEV_CS5530 is not set
+# CONFIG_BLK_DEV_HPT34X is not set
+# CONFIG_BLK_DEV_HPT366 is not set
+# CONFIG_BLK_DEV_SC1200 is not set
+CONFIG_BLK_DEV_PIIX=y
+# CONFIG_BLK_DEV_NS87415 is not set
+# CONFIG_BLK_DEV_PDC202XX_OLD is not set
+# CONFIG_BLK_DEV_PDC202XX_NEW is not set
+CONFIG_BLK_DEV_SVWKS=y
+# CONFIG_BLK_DEV_SIIMAGE is not set
+# CONFIG_BLK_DEV_SIS5513 is not set
+# CONFIG_BLK_DEV_SLC90E66 is not set
+# CONFIG_BLK_DEV_TRM290 is not set
+# CONFIG_BLK_DEV_VIA82CXXX is not set
+# CONFIG_IDE_ARM is not set
+# CONFIG_IDE_CHIPSETS is not set
+CONFIG_BLK_DEV_IDEDMA=y
+# CONFIG_IDEDMA_IVB is not set
+CONFIG_IDEDMA_AUTO=y
+# CONFIG_BLK_DEV_HD is not set
+
+#
+# SCSI device support
+#
+CONFIG_SCSI=y
+CONFIG_SCSI_PROC_FS=y
+
+#
+# SCSI support type (disk, tape, CD-ROM)
+#
+CONFIG_BLK_DEV_SD=y
+# CONFIG_CHR_DEV_ST is not set
+# CONFIG_CHR_DEV_OSST is not set
+# CONFIG_BLK_DEV_SR is not set
+# CONFIG_CHR_DEV_SG is not set
+
+#
+# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
+#
+# CONFIG_SCSI_MULTI_LUN is not set
+# CONFIG_SCSI_CONSTANTS is not set
+# CONFIG_SCSI_LOGGING is not set
+
+#
+# SCSI Transport Attributes
+#
+# CONFIG_SCSI_SPI_ATTRS is not set
+# CONFIG_SCSI_FC_ATTRS is not set
+
+#
+# SCSI low-level drivers
+#
+CONFIG_BLK_DEV_3W_XXXX_RAID=y
+# CONFIG_SCSI_3W_9XXX is not set
+# CONFIG_SCSI_7000FASST is not set
+# CONFIG_SCSI_ACARD is not set
+# CONFIG_SCSI_AHA152X is not set
+# CONFIG_SCSI_AHA1542 is not set
+CONFIG_SCSI_AACRAID=y
+CONFIG_SCSI_AIC7XXX=y
+CONFIG_AIC7XXX_CMDS_PER_DEVICE=32
+CONFIG_AIC7XXX_RESET_DELAY_MS=15000
+CONFIG_AIC7XXX_DEBUG_ENABLE=y
+CONFIG_AIC7XXX_DEBUG_MASK=0
+CONFIG_AIC7XXX_REG_PRETTY_PRINT=y
+# CONFIG_SCSI_AIC7XXX_OLD is not set
+CONFIG_SCSI_AIC79XX=y
+CONFIG_AIC79XX_CMDS_PER_DEVICE=32
+CONFIG_AIC79XX_RESET_DELAY_MS=15000
+# CONFIG_AIC79XX_ENABLE_RD_STRM is not set
+CONFIG_AIC79XX_DEBUG_ENABLE=y
+CONFIG_AIC79XX_DEBUG_MASK=0
+CONFIG_AIC79XX_REG_PRETTY_PRINT=y
+# CONFIG_SCSI_DPT_I2O is not set
+# CONFIG_SCSI_ADVANSYS is not set
+# CONFIG_SCSI_IN2000 is not set
+# CONFIG_MEGARAID_NEWGEN is not set
+# CONFIG_MEGARAID_LEGACY is not set
+CONFIG_SCSI_SATA=y
+# CONFIG_SCSI_SATA_AHCI is not set
+# CONFIG_SCSI_SATA_SVW is not set
+CONFIG_SCSI_ATA_PIIX=y
+# CONFIG_SCSI_SATA_NV is not set
+CONFIG_SCSI_SATA_PROMISE=y
+CONFIG_SCSI_SATA_SX4=y
+CONFIG_SCSI_SATA_SIL=y
+# CONFIG_SCSI_SATA_SIS is not set
+# CONFIG_SCSI_SATA_ULI is not set
+# CONFIG_SCSI_SATA_VIA is not set
+# CONFIG_SCSI_SATA_VITESSE is not set
+CONFIG_SCSI_BUSLOGIC=y
+# CONFIG_SCSI_OMIT_FLASHPOINT is not set
+# CONFIG_SCSI_CPQFCTS is not set
+# CONFIG_SCSI_DMX3191D is not set
+# CONFIG_SCSI_DTC3280 is not set
+# CONFIG_SCSI_EATA is not set
+# CONFIG_SCSI_EATA_PIO is not set
+# CONFIG_SCSI_FUTURE_DOMAIN is not set
+# CONFIG_SCSI_GDTH is not set
+# CONFIG_SCSI_GENERIC_NCR5380 is not set
+# CONFIG_SCSI_GENERIC_NCR5380_MMIO is not set
+# CONFIG_SCSI_IPS is not set
+# CONFIG_SCSI_INITIO is not set
+# CONFIG_SCSI_INIA100 is not set
+# CONFIG_SCSI_NCR53C406A is not set
+# CONFIG_SCSI_SYM53C8XX_2 is not set
+# CONFIG_SCSI_IPR is not set
+# CONFIG_SCSI_PAS16 is not set
+# CONFIG_SCSI_PCI2000 is not set
+# CONFIG_SCSI_PCI2220I is not set
+# CONFIG_SCSI_PSI240I is not set
+# CONFIG_SCSI_QLOGIC_FAS is not set
+# CONFIG_SCSI_QLOGIC_ISP is not set
+# CONFIG_SCSI_QLOGIC_FC is not set
+# CONFIG_SCSI_QLOGIC_1280 is not set
+# CONFIG_SCSI_QLOGIC_1280_1040 is not set
+CONFIG_SCSI_QLA2XXX=y
+# CONFIG_SCSI_QLA21XX is not set
+# CONFIG_SCSI_QLA22XX is not set
+# CONFIG_SCSI_QLA2300 is not set
+# CONFIG_SCSI_QLA2322 is not set
+# CONFIG_SCSI_QLA6312 is not set
+# CONFIG_SCSI_QLA6322 is not set
+# CONFIG_SCSI_SEAGATE is not set
+# CONFIG_SCSI_SYM53C416 is not set
+# CONFIG_SCSI_DC395x is not set
+# CONFIG_SCSI_DC390T is not set
+# CONFIG_SCSI_T128 is not set
+# CONFIG_SCSI_U14_34F is not set
+# CONFIG_SCSI_ULTRASTOR is not set
+# CONFIG_SCSI_NSP32 is not set
+# CONFIG_SCSI_DEBUG is not set
+
+#
+# Old CD-ROM drivers (not SCSI, not IDE)
+#
+# CONFIG_CD_NO_IDESCSI is not set
+
+#
+# Multi-device support (RAID and LVM)
+#
+CONFIG_MD=y
+CONFIG_BLK_DEV_MD=y
+# CONFIG_MD_LINEAR is not set
+CONFIG_MD_RAID0=y
+CONFIG_MD_RAID1=y
+# CONFIG_MD_RAID10 is not set
+CONFIG_MD_RAID5=y
+# CONFIG_MD_RAID6 is not set
+# CONFIG_MD_MULTIPATH is not set
+# CONFIG_MD_FAULTY is not set
+CONFIG_BLK_DEV_DM=y
+# CONFIG_DM_CRYPT is not set
+CONFIG_DM_SNAPSHOT=y
+CONFIG_DM_MIRROR=y
+# CONFIG_DM_ZERO is not set
+
+#
+# Fusion MPT device support
+#
+# CONFIG_FUSION is not set
+
+#
+# IEEE 1394 (FireWire) support
+#
+# CONFIG_IEEE1394 is not set
+
+#
+# I2O device support
+#
+# CONFIG_I2O is not set
+
+#
+# Networking support
+#
+CONFIG_NET=y
+
+#
+# Networking options
+#
+CONFIG_PACKET=y
+# CONFIG_PACKET_MMAP is not set
+# CONFIG_NETLINK_DEV is not set
+CONFIG_UNIX=y
+# CONFIG_NET_KEY is not set
+CONFIG_INET=y
+# CONFIG_IP_MULTICAST is not set
+# CONFIG_IP_ADVANCED_ROUTER is not set
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+# CONFIG_IP_PNP_BOOTP is not set
+# CONFIG_IP_PNP_RARP is not set
+# CONFIG_NET_IPIP is not set
+# CONFIG_NET_IPGRE is not set
+# CONFIG_ARPD is not set
+# CONFIG_SYN_COOKIES is not set
+# CONFIG_INET_AH is not set
+# CONFIG_INET_ESP is not set
+# CONFIG_INET_IPCOMP is not set
+# CONFIG_INET_TUNNEL is not set
+CONFIG_IP_TCPDIAG=y
+# CONFIG_IP_TCPDIAG_IPV6 is not set
+
+#
+# IP: Virtual Server Configuration
+#
+# CONFIG_IP_VS is not set
+# CONFIG_IPV6 is not set
+CONFIG_NETFILTER=y
+# CONFIG_NETFILTER_DEBUG is not set
+CONFIG_BRIDGE_NETFILTER=y
+
+#
+# IP: Netfilter Configuration
+#
+CONFIG_IP_NF_CONNTRACK=m
+CONFIG_IP_NF_CT_ACCT=y
+# CONFIG_IP_NF_CONNTRACK_MARK is not set
+# CONFIG_IP_NF_CT_PROTO_SCTP is not set
+CONFIG_IP_NF_FTP=m
+# CONFIG_IP_NF_IRC is not set
+# CONFIG_IP_NF_TFTP is not set
+# CONFIG_IP_NF_AMANDA is not set
+# CONFIG_IP_NF_QUEUE is not set
+CONFIG_IP_NF_IPTABLES=m
+# CONFIG_IP_NF_MATCH_LIMIT is not set
+# CONFIG_IP_NF_MATCH_IPRANGE is not set
+# CONFIG_IP_NF_MATCH_MAC is not set
+# CONFIG_IP_NF_MATCH_PKTTYPE is not set
+# CONFIG_IP_NF_MATCH_MARK is not set
+# CONFIG_IP_NF_MATCH_MULTIPORT is not set
+# CONFIG_IP_NF_MATCH_TOS is not set
+# CONFIG_IP_NF_MATCH_RECENT is not set
+# CONFIG_IP_NF_MATCH_ECN is not set
+# CONFIG_IP_NF_MATCH_DSCP is not set
+# CONFIG_IP_NF_MATCH_AH_ESP is not set
+# CONFIG_IP_NF_MATCH_LENGTH is not set
+# CONFIG_IP_NF_MATCH_TTL is not set
+# CONFIG_IP_NF_MATCH_TCPMSS is not set
+# CONFIG_IP_NF_MATCH_HELPER is not set
+# CONFIG_IP_NF_MATCH_STATE is not set
+# CONFIG_IP_NF_MATCH_CONNTRACK is not set
+# CONFIG_IP_NF_MATCH_OWNER is not set
+# CONFIG_IP_NF_MATCH_PHYSDEV is not set
+# CONFIG_IP_NF_MATCH_ADDRTYPE is not set
+# CONFIG_IP_NF_MATCH_REALM is not set
+# CONFIG_IP_NF_MATCH_SCTP is not set
+# CONFIG_IP_NF_MATCH_COMMENT is not set
+# CONFIG_IP_NF_MATCH_HASHLIMIT is not set
+# CONFIG_IP_NF_FILTER is not set
+# CONFIG_IP_NF_TARGET_LOG is not set
+# CONFIG_IP_NF_TARGET_ULOG is not set
+# CONFIG_IP_NF_TARGET_TCPMSS is not set
+# CONFIG_IP_NF_NAT is not set
+# CONFIG_IP_NF_MANGLE is not set
+# CONFIG_IP_NF_RAW is not set
+# CONFIG_IP_NF_ARPTABLES is not set
+# CONFIG_IP_NF_COMPAT_IPCHAINS is not set
+# CONFIG_IP_NF_COMPAT_IPFWADM is not set
+
+#
+# Bridge: Netfilter Configuration
+#
+# CONFIG_BRIDGE_NF_EBTABLES is not set
+
+#
+# SCTP Configuration (EXPERIMENTAL)
+#
+# CONFIG_IP_SCTP is not set
+# CONFIG_ATM is not set
+CONFIG_BRIDGE=y
+# CONFIG_VLAN_8021Q is not set
+# CONFIG_DECNET is not set
+# CONFIG_LLC2 is not set
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_NET_DIVERT is not set
+# CONFIG_ECONET is not set
+# CONFIG_WAN_ROUTER is not set
+
+#
+# QoS and/or fair queueing
+#
+# CONFIG_NET_SCHED is not set
+# CONFIG_NET_CLS_ROUTE is not set
+
+#
+# Network testing
+#
+# CONFIG_NET_PKTGEN is not set
+# CONFIG_NETPOLL is not set
+# CONFIG_NET_POLL_CONTROLLER is not set
+# CONFIG_HAMRADIO is not set
+# CONFIG_IRDA is not set
+# CONFIG_BT is not set
+CONFIG_NETDEVICES=y
+# CONFIG_DUMMY is not set
+# CONFIG_BONDING is not set
+# CONFIG_EQUALIZER is not set
+# CONFIG_TUN is not set
+
+#
+# ARCnet devices
+#
+# CONFIG_ARCNET is not set
+
+#
+# Ethernet (10 or 100Mbit)
+#
+CONFIG_NET_ETHERNET=y
+CONFIG_MII=y
+# CONFIG_HAPPYMEAL is not set
+# CONFIG_SUNGEM is not set
+CONFIG_NET_VENDOR_3COM=y
+# CONFIG_EL1 is not set
+# CONFIG_EL2 is not set
+# CONFIG_ELPLUS is not set
+# CONFIG_EL16 is not set
+# CONFIG_EL3 is not set
+# CONFIG_3C515 is not set
+CONFIG_VORTEX=y
+# CONFIG_TYPHOON is not set
+# CONFIG_LANCE is not set
+# CONFIG_NET_VENDOR_SMC is not set
+# CONFIG_NET_VENDOR_RACAL is not set
+
+#
+# Tulip family network device support
+#
+CONFIG_NET_TULIP=y
+# CONFIG_DE2104X is not set
+CONFIG_TULIP=y
+# CONFIG_TULIP_MWI is not set
+# CONFIG_TULIP_MMIO is not set
+# CONFIG_TULIP_NAPI is not set
+# CONFIG_DE4X5 is not set
+# CONFIG_WINBOND_840 is not set
+# CONFIG_DM9102 is not set
+# CONFIG_AT1700 is not set
+# CONFIG_DEPCA is not set
+# CONFIG_HP100 is not set
+# CONFIG_NET_ISA is not set
+CONFIG_NET_PCI=y
+CONFIG_PCNET32=y
+# CONFIG_AMD8111_ETH is not set
+# CONFIG_ADAPTEC_STARFIRE is not set
+# CONFIG_AC3200 is not set
+# CONFIG_APRICOT is not set
+# CONFIG_B44 is not set
+# CONFIG_FORCEDETH is not set
+# CONFIG_CS89x0 is not set
+# CONFIG_DGRS is not set
+# CONFIG_EEPRO100 is not set
+CONFIG_E100=y
+# CONFIG_E100_NAPI is not set
+# CONFIG_FEALNX is not set
+# CONFIG_NATSEMI is not set
+# CONFIG_NE2K_PCI is not set
+# CONFIG_8139CP is not set
+CONFIG_8139TOO=y
+CONFIG_8139TOO_PIO=y
+# CONFIG_8139TOO_TUNE_TWISTER is not set
+# CONFIG_8139TOO_8129 is not set
+# CONFIG_8139_OLD_RX_RESET is not set
+# CONFIG_SIS900 is not set
+# CONFIG_EPIC100 is not set
+# CONFIG_SUNDANCE is not set
+# CONFIG_TLAN is not set
+CONFIG_VIA_RHINE=y
+# CONFIG_VIA_RHINE_MMIO is not set
+# CONFIG_NET_POCKET is not set
+
+#
+# Ethernet (1000 Mbit)
+#
+CONFIG_ACENIC=y
+# CONFIG_ACENIC_OMIT_TIGON_I is not set
+# CONFIG_DL2K is not set
+CONFIG_E1000=y
+# CONFIG_E1000_NAPI is not set
+# CONFIG_NS83820 is not set
+# CONFIG_HAMACHI is not set
+# CONFIG_YELLOWFIN is not set
+# CONFIG_R8169 is not set
+# CONFIG_SK98LIN is not set
+# CONFIG_VIA_VELOCITY is not set
+CONFIG_TIGON3=y
+
+#
+# Ethernet (10000 Mbit)
+#
+# CONFIG_IXGB is not set
+# CONFIG_S2IO is not set
+
+#
+# Token Ring devices
+#
+# CONFIG_TR is not set
+
+#
+# Wireless LAN (non-hamradio)
+#
+# CONFIG_NET_RADIO is not set
+
+#
+# Wan interfaces
+#
+# CONFIG_WAN is not set
+# CONFIG_FDDI is not set
+# CONFIG_HIPPI is not set
+# CONFIG_PPP is not set
+# CONFIG_SLIP is not set
+# CONFIG_NET_FC is not set
+# CONFIG_SHAPER is not set
+# CONFIG_NETCONSOLE is not set
+
+#
+# ISDN subsystem
+#
+# CONFIG_ISDN is not set
+
+#
+# Telephony Support
+#
+# CONFIG_PHONE is not set
+
+#
+# Input device support
+#
+CONFIG_INPUT=y
+
+#
+# Userland interfaces
+#
+CONFIG_INPUT_MOUSEDEV=y
+CONFIG_INPUT_MOUSEDEV_PSAUX=y
+CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
+CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
+# CONFIG_INPUT_JOYDEV is not set
+# CONFIG_INPUT_TSDEV is not set
+# CONFIG_INPUT_EVDEV is not set
+# CONFIG_INPUT_EVBUG is not set
+
+#
+# Input I/O drivers
+#
+# CONFIG_GAMEPORT is not set
+CONFIG_SOUND_GAMEPORT=y
+CONFIG_SERIO=y
+CONFIG_SERIO_I8042=y
+CONFIG_SERIO_SERPORT=y
+# CONFIG_SERIO_CT82C710 is not set
+# CONFIG_SERIO_PCIPS2 is not set
+# CONFIG_SERIO_RAW is not set
+
+#
+# Input Device Drivers
+#
+CONFIG_INPUT_KEYBOARD=y
+CONFIG_KEYBOARD_ATKBD=y
+# CONFIG_KEYBOARD_SUNKBD is not set
+# CONFIG_KEYBOARD_LKKBD is not set
+# CONFIG_KEYBOARD_XTKBD is not set
+# CONFIG_KEYBOARD_NEWTON is not set
+CONFIG_INPUT_MOUSE=y
+CONFIG_MOUSE_PS2=y
+# CONFIG_MOUSE_SERIAL is not set
+# CONFIG_MOUSE_INPORT is not set
+# CONFIG_MOUSE_LOGIBM is not set
+# CONFIG_MOUSE_PC110PAD is not set
+# CONFIG_MOUSE_VSXXXAA is not set
+# CONFIG_INPUT_JOYSTICK is not set
+# CONFIG_INPUT_TOUCHSCREEN is not set
+# CONFIG_INPUT_MISC is not set
+
+#
+# Character devices
+#
+CONFIG_VT=y
+CONFIG_VT_CONSOLE=y
+CONFIG_HW_CONSOLE=y
+# CONFIG_SERIAL_NONSTANDARD is not set
+
+#
+# Serial drivers
+#
+# CONFIG_SERIAL_8250 is not set
+
+#
+# Non-8250 serial port support
+#
+CONFIG_UNIX98_PTYS=y
+CONFIG_LEGACY_PTYS=y
+CONFIG_LEGACY_PTY_COUNT=256
+
+#
+# IPMI
+#
+# CONFIG_IPMI_HANDLER is not set
+
+#
+# Watchdog Cards
+#
+# CONFIG_WATCHDOG is not set
+# CONFIG_HW_RANDOM is not set
+# CONFIG_NVRAM is not set
+# CONFIG_RTC is not set
+# CONFIG_GEN_RTC is not set
+# CONFIG_DTLK is not set
+# CONFIG_R3964 is not set
+# CONFIG_APPLICOM is not set
+# CONFIG_SONYPI is not set
+
+#
+# Ftape, the floppy tape device driver
+#
+# CONFIG_FTAPE is not set
+# CONFIG_AGP is not set
+# CONFIG_DRM is not set
+# CONFIG_MWAVE is not set
+# CONFIG_RAW_DRIVER is not set
+# CONFIG_HANGCHECK_TIMER is not set
+
+#
+# I2C support
+#
+# CONFIG_I2C is not set
+
+#
+# Dallas's 1-wire bus
+#
+# CONFIG_W1 is not set
+
+#
+# Misc devices
+#
+# CONFIG_IBM_ASM is not set
+
+#
+# Multimedia devices
+#
+# CONFIG_VIDEO_DEV is not set
+
+#
+# Digital Video Broadcasting Devices
+#
+# CONFIG_DVB is not set
+
+#
+# Graphics support
+#
+# CONFIG_FB is not set
+# CONFIG_VIDEO_SELECT is not set
+
+#
+# Console display driver support
+#
+CONFIG_VGA_CONSOLE=y
+# CONFIG_MDA_CONSOLE is not set
+CONFIG_DUMMY_CONSOLE=y
+
+#
+# Sound
+#
+# CONFIG_SOUND is not set
+
+#
+# USB support
+#
+# CONFIG_USB is not set
+CONFIG_USB_ARCH_HAS_HCD=y
+CONFIG_USB_ARCH_HAS_OHCI=y
+
+#
+# USB Gadget Support
+#
+# CONFIG_USB_GADGET is not set
+
+#
+# File systems
+#
+CONFIG_EXT2_FS=y
+# CONFIG_EXT2_FS_XATTR is not set
+CONFIG_EXT3_FS=y
+CONFIG_EXT3_FS_XATTR=y
+# CONFIG_EXT3_FS_POSIX_ACL is not set
+# CONFIG_EXT3_FS_SECURITY is not set
+CONFIG_JBD=y
+# CONFIG_JBD_DEBUG is not set
+CONFIG_FS_MBCACHE=y
+CONFIG_REISERFS_FS=y
+# CONFIG_REISERFS_CHECK is not set
+# CONFIG_REISERFS_PROC_INFO is not set
+# CONFIG_REISERFS_FS_XATTR is not set
+# CONFIG_JFS_FS is not set
+# CONFIG_XFS_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_ROMFS_FS is not set
+# CONFIG_QUOTA is not set
+CONFIG_DNOTIFY=y
+# CONFIG_AUTOFS_FS is not set
+# CONFIG_AUTOFS4_FS is not set
+
+#
+# CD-ROM/DVD Filesystems
+#
+CONFIG_ISO9660_FS=y
+CONFIG_JOLIET=y
+CONFIG_ZISOFS=y
+CONFIG_ZISOFS_FS=y
+# CONFIG_UDF_FS is not set
+
+#
+# DOS/FAT/NT Filesystems
+#
+CONFIG_FAT_FS=m
+CONFIG_MSDOS_FS=m
+CONFIG_VFAT_FS=m
+CONFIG_FAT_DEFAULT_CODEPAGE=437
+CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+CONFIG_PROC_KCORE=y
+CONFIG_SYSFS=y
+# CONFIG_DEVFS_FS is not set
+# CONFIG_DEVPTS_FS_XATTR is not set
+CONFIG_TMPFS=y
+# CONFIG_TMPFS_XATTR is not set
+# CONFIG_HUGETLBFS is not set
+# CONFIG_HUGETLB_PAGE is not set
+CONFIG_RAMFS=y
+
+#
+# Miscellaneous filesystems
+#
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_HFSPLUS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+# CONFIG_CRAMFS is not set
+# CONFIG_VXFS_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_SYSV_FS is not set
+# CONFIG_UFS_FS is not set
+
+#
+# Network File Systems
+#
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3=y
+# CONFIG_NFS_V4 is not set
+# CONFIG_NFS_DIRECTIO is not set
+CONFIG_NFSD=m
+CONFIG_NFSD_V3=y
+# CONFIG_NFSD_V4 is not set
+CONFIG_NFSD_TCP=y
+CONFIG_ROOT_NFS=y
+CONFIG_LOCKD=y
+CONFIG_LOCKD_V4=y
+CONFIG_EXPORTFS=m
+CONFIG_SUNRPC=y
+# CONFIG_RPCSEC_GSS_KRB5 is not set
+# CONFIG_RPCSEC_GSS_SPKM3 is not set
+# CONFIG_SMB_FS is not set
+# CONFIG_CIFS is not set
+# CONFIG_NCP_FS is not set
+# CONFIG_CODA_FS is not set
+# CONFIG_AFS_FS is not set
+
+#
+# Partition Types
+#
+# CONFIG_PARTITION_ADVANCED is not set
+CONFIG_MSDOS_PARTITION=y
+
+#
+# Native Language Support
+#
+CONFIG_NLS=y
+CONFIG_NLS_DEFAULT="iso8859-1"
+CONFIG_NLS_CODEPAGE_437=y
+# CONFIG_NLS_CODEPAGE_737 is not set
+# CONFIG_NLS_CODEPAGE_775 is not set
+# CONFIG_NLS_CODEPAGE_850 is not set
+# CONFIG_NLS_CODEPAGE_852 is not set
+# CONFIG_NLS_CODEPAGE_855 is not set
+# CONFIG_NLS_CODEPAGE_857 is not set
+# CONFIG_NLS_CODEPAGE_860 is not set
+# CONFIG_NLS_CODEPAGE_861 is not set
+# CONFIG_NLS_CODEPAGE_862 is not set
+# CONFIG_NLS_CODEPAGE_863 is not set
+# CONFIG_NLS_CODEPAGE_864 is not set
+# CONFIG_NLS_CODEPAGE_865 is not set
+# CONFIG_NLS_CODEPAGE_866 is not set
+# CONFIG_NLS_CODEPAGE_869 is not set
+# CONFIG_NLS_CODEPAGE_936 is not set
+# CONFIG_NLS_CODEPAGE_950 is not set
+# CONFIG_NLS_CODEPAGE_932 is not set
+# CONFIG_NLS_CODEPAGE_949 is not set
+# CONFIG_NLS_CODEPAGE_874 is not set
+# CONFIG_NLS_ISO8859_8 is not set
+# CONFIG_NLS_CODEPAGE_1250 is not set
+# CONFIG_NLS_CODEPAGE_1251 is not set
+# CONFIG_NLS_ASCII is not set
+CONFIG_NLS_ISO8859_1=y
+# CONFIG_NLS_ISO8859_2 is not set
+# CONFIG_NLS_ISO8859_3 is not set
+# CONFIG_NLS_ISO8859_4 is not set
+# CONFIG_NLS_ISO8859_5 is not set
+# CONFIG_NLS_ISO8859_6 is not set
+# CONFIG_NLS_ISO8859_7 is not set
+# CONFIG_NLS_ISO8859_9 is not set
+# CONFIG_NLS_ISO8859_13 is not set
+# CONFIG_NLS_ISO8859_14 is not set
+# CONFIG_NLS_ISO8859_15 is not set
+# CONFIG_NLS_KOI8_R is not set
+# CONFIG_NLS_KOI8_U is not set
+# CONFIG_NLS_UTF8 is not set
+
+#
+# Security options
+#
+# CONFIG_KEYS is not set
+# CONFIG_SECURITY is not set
+
+#
+# Cryptographic options
+#
+CONFIG_CRYPTO=y
+CONFIG_CRYPTO_HMAC=y
+# CONFIG_CRYPTO_NULL is not set
+# CONFIG_CRYPTO_MD4 is not set
+CONFIG_CRYPTO_MD5=m
+CONFIG_CRYPTO_SHA1=m
+# CONFIG_CRYPTO_SHA256 is not set
+# CONFIG_CRYPTO_SHA512 is not set
+# CONFIG_CRYPTO_WP512 is not set
+CONFIG_CRYPTO_DES=m
+# CONFIG_CRYPTO_BLOWFISH is not set
+# CONFIG_CRYPTO_TWOFISH is not set
+# CONFIG_CRYPTO_SERPENT is not set
+# CONFIG_CRYPTO_AES_586 is not set
+# CONFIG_CRYPTO_CAST5 is not set
+# CONFIG_CRYPTO_CAST6 is not set
+# CONFIG_CRYPTO_TEA is not set
+# CONFIG_CRYPTO_ARC4 is not set
+# CONFIG_CRYPTO_KHAZAD is not set
+# CONFIG_CRYPTO_ANUBIS is not set
+# CONFIG_CRYPTO_DEFLATE is not set
+# CONFIG_CRYPTO_MICHAEL_MIC is not set
+CONFIG_CRYPTO_CRC32C=m
+# CONFIG_CRYPTO_TEST is not set
+
+#
+# Library routines
+#
+# CONFIG_CRC_CCITT is not set
+CONFIG_CRC32=y
+CONFIG_LIBCRC32C=y
+CONFIG_ZLIB_INFLATE=y
--- /dev/null
- # Fri Nov 19 20:16:52 2004
+#
+# Automatically generated make config: don't edit
+# Linux kernel version: 2.6.10-rc2-xenU
- # CONFIG_DEBUG_KERNEL is not set
++# Wed Dec 1 09:22:09 2004
+#
+CONFIG_XEN=y
+CONFIG_ARCH_XEN=y
+CONFIG_NO_IDLE_HZ=y
+
+#
+# XEN
+#
+# CONFIG_XEN_PRIVILEGED_GUEST is not set
+# CONFIG_XEN_PHYSDEV_ACCESS is not set
+# CONFIG_XEN_BLKDEV_BACKEND is not set
+# CONFIG_XEN_NETDEV_BACKEND is not set
+CONFIG_XEN_BLKDEV_FRONTEND=y
+CONFIG_XEN_NETDEV_FRONTEND=y
+# CONFIG_XEN_NETDEV_FRONTEND_PIPELINED_TRANSMITTER is not set
+CONFIG_XEN_WRITABLE_PAGETABLES=y
+CONFIG_XEN_SCRUB_PAGES=y
+CONFIG_HAVE_ARCH_DEV_ALLOC_SKB=y
+CONFIG_X86=y
+# CONFIG_X86_64 is not set
+
+#
+# Code maturity level options
+#
+CONFIG_EXPERIMENTAL=y
+CONFIG_CLEAN_COMPILE=y
+CONFIG_BROKEN_ON_SMP=y
+CONFIG_LOCK_KERNEL=y
+
+#
+# General setup
+#
+CONFIG_LOCALVERSION=""
+CONFIG_SWAP=y
+CONFIG_SYSVIPC=y
+# CONFIG_POSIX_MQUEUE is not set
+# CONFIG_BSD_PROCESS_ACCT is not set
+CONFIG_SYSCTL=y
+# CONFIG_AUDIT is not set
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_HOTPLUG=y
+CONFIG_KOBJECT_UEVENT=y
+# CONFIG_IKCONFIG is not set
+# CONFIG_EMBEDDED is not set
+CONFIG_KALLSYMS=y
++# CONFIG_KALLSYMS_ALL is not set
+# CONFIG_KALLSYMS_EXTRA_PASS is not set
+CONFIG_FUTEX=y
+CONFIG_EPOLL=y
+# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
+CONFIG_SHMEM=y
+CONFIG_CC_ALIGN_FUNCTIONS=0
+CONFIG_CC_ALIGN_LABELS=0
+CONFIG_CC_ALIGN_LOOPS=0
+CONFIG_CC_ALIGN_JUMPS=0
+# CONFIG_TINY_SHMEM is not set
+
+#
+# Loadable module support
+#
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_MODULE_FORCE_UNLOAD is not set
+CONFIG_OBSOLETE_MODPARM=y
+# CONFIG_MODVERSIONS is not set
+# CONFIG_MODULE_SRCVERSION_ALL is not set
+CONFIG_KMOD=y
+
+#
+# X86 Processor Configuration
+#
+CONFIG_XENARCH="i386"
+CONFIG_MMU=y
+CONFIG_UID16=y
+CONFIG_GENERIC_ISA_DMA=y
+CONFIG_GENERIC_IOMAP=y
+# CONFIG_M686 is not set
+# CONFIG_MPENTIUMII is not set
+# CONFIG_MPENTIUMIII is not set
+# CONFIG_MPENTIUMM is not set
+CONFIG_MPENTIUM4=y
+# CONFIG_MK6 is not set
+# CONFIG_MK7 is not set
+# CONFIG_MK8 is not set
+# CONFIG_MCRUSOE is not set
+# CONFIG_MCYRIXIII is not set
+# CONFIG_MVIAC3_2 is not set
+# CONFIG_X86_GENERIC is not set
+CONFIG_X86_CMPXCHG=y
+CONFIG_X86_XADD=y
+CONFIG_X86_L1_CACHE_SHIFT=7
+CONFIG_RWSEM_XCHGADD_ALGORITHM=y
+CONFIG_X86_WP_WORKS_OK=y
+CONFIG_X86_INVLPG=y
+CONFIG_X86_BSWAP=y
+CONFIG_X86_POPAD_OK=y
+CONFIG_X86_GOOD_APIC=y
+CONFIG_X86_INTEL_USERCOPY=y
+CONFIG_X86_USE_PPRO_CHECKSUM=y
+# CONFIG_HPET_TIMER is not set
+# CONFIG_HPET_EMULATE_RTC is not set
+# CONFIG_SMP is not set
+CONFIG_PREEMPT=y
+CONFIG_X86_CPUID=y
+
+#
+# Firmware Drivers
+#
+# CONFIG_EDD is not set
+CONFIG_NOHIGHMEM=y
+# CONFIG_HIGHMEM4G is not set
+CONFIG_HAVE_DEC_LOCK=y
+# CONFIG_REGPARM is not set
+
+#
+# Kernel hacking
+#
++CONFIG_DEBUG_KERNEL=y
+CONFIG_EARLY_PRINTK=y
++# CONFIG_DEBUG_STACKOVERFLOW is not set
++# CONFIG_DEBUG_STACK_USAGE is not set
++CONFIG_DEBUG_SLAB=y
++# CONFIG_MAGIC_SYSRQ is not set
++# CONFIG_DEBUG_SPINLOCK is not set
++CONFIG_DEBUG_PAGEALLOC=y
++# CONFIG_DEBUG_INFO is not set
+# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
+# CONFIG_FRAME_POINTER is not set
+# CONFIG_4KSTACKS is not set
+CONFIG_GENERIC_HARDIRQS=y
+CONFIG_GENERIC_IRQ_PROBE=y
+CONFIG_X86_BIOS_REBOOT=y
+CONFIG_PC=y
+
+#
+# Executable file formats
+#
+CONFIG_BINFMT_ELF=y
+# CONFIG_BINFMT_AOUT is not set
+# CONFIG_BINFMT_MISC is not set
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+CONFIG_STANDALONE=y
+CONFIG_PREVENT_FIRMWARE_BUILD=y
+# CONFIG_FW_LOADER is not set
++# CONFIG_DEBUG_DRIVER is not set
+
+#
+# Block devices
+#
+# CONFIG_BLK_DEV_FD is not set
+CONFIG_BLK_DEV_LOOP=m
+# CONFIG_BLK_DEV_CRYPTOLOOP is not set
+CONFIG_BLK_DEV_NBD=m
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=4096
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE=""
+# CONFIG_LBD is not set
+# CONFIG_CDROM_PKTCDVD is not set
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+CONFIG_IOSCHED_AS=y
+CONFIG_IOSCHED_DEADLINE=y
+CONFIG_IOSCHED_CFQ=y
+
+#
+# SCSI device support
+#
+CONFIG_SCSI=m
+CONFIG_SCSI_PROC_FS=y
+
+#
+# SCSI support type (disk, tape, CD-ROM)
+#
+CONFIG_BLK_DEV_SD=m
+# CONFIG_CHR_DEV_ST is not set
+# CONFIG_CHR_DEV_OSST is not set
+# CONFIG_BLK_DEV_SR is not set
+# CONFIG_CHR_DEV_SG is not set
+
+#
+# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
+#
+# CONFIG_SCSI_MULTI_LUN is not set
+# CONFIG_SCSI_CONSTANTS is not set
+# CONFIG_SCSI_LOGGING is not set
+
+#
+# SCSI Transport Attributes
+#
+# CONFIG_SCSI_SPI_ATTRS is not set
+# CONFIG_SCSI_FC_ATTRS is not set
+
+#
+# SCSI low-level drivers
+#
+# CONFIG_SCSI_SATA is not set
+# CONFIG_SCSI_QLOGIC_1280_1040 is not set
+# CONFIG_SCSI_DEBUG is not set
+
+#
+# Multi-device support (RAID and LVM)
+#
+# CONFIG_MD is not set
+
+#
+# Networking support
+#
+CONFIG_NET=y
+
+#
+# Networking options
+#
+CONFIG_PACKET=y
+# CONFIG_PACKET_MMAP is not set
+# CONFIG_NETLINK_DEV is not set
+CONFIG_UNIX=y
+# CONFIG_NET_KEY is not set
+CONFIG_INET=y
+# CONFIG_IP_MULTICAST is not set
+# CONFIG_IP_ADVANCED_ROUTER is not set
+CONFIG_IP_PNP=y
+# CONFIG_IP_PNP_DHCP is not set
+# CONFIG_IP_PNP_BOOTP is not set
+# CONFIG_IP_PNP_RARP is not set
+# CONFIG_NET_IPIP is not set
+# CONFIG_NET_IPGRE is not set
+# CONFIG_ARPD is not set
+# CONFIG_SYN_COOKIES is not set
+# CONFIG_INET_AH is not set
+# CONFIG_INET_ESP is not set
+# CONFIG_INET_IPCOMP is not set
+# CONFIG_INET_TUNNEL is not set
+CONFIG_IP_TCPDIAG=y
+# CONFIG_IP_TCPDIAG_IPV6 is not set
+# CONFIG_IPV6 is not set
+# CONFIG_NETFILTER is not set
+
+#
+# SCTP Configuration (EXPERIMENTAL)
+#
+# CONFIG_IP_SCTP is not set
+# CONFIG_ATM is not set
+# CONFIG_BRIDGE is not set
+# CONFIG_VLAN_8021Q is not set
+# CONFIG_DECNET is not set
+# CONFIG_LLC2 is not set
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_NET_DIVERT is not set
+# CONFIG_ECONET is not set
+# CONFIG_WAN_ROUTER is not set
+
+#
+# QoS and/or fair queueing
+#
+# CONFIG_NET_SCHED is not set
+# CONFIG_NET_CLS_ROUTE is not set
+
+#
+# Network testing
+#
+# CONFIG_NET_PKTGEN is not set
+# CONFIG_NETPOLL is not set
+# CONFIG_NET_POLL_CONTROLLER is not set
+# CONFIG_HAMRADIO is not set
+# CONFIG_IRDA is not set
+# CONFIG_BT is not set
+CONFIG_NETDEVICES=y
+# CONFIG_DUMMY is not set
+# CONFIG_BONDING is not set
+# CONFIG_EQUALIZER is not set
+# CONFIG_TUN is not set
+
+#
+# Ethernet (10 or 100Mbit)
+#
+# CONFIG_NET_ETHERNET is not set
+
+#
+# Ethernet (1000 Mbit)
+#
+
+#
+# Ethernet (10000 Mbit)
+#
+
+#
+# Token Ring devices
+#
+
+#
+# Wireless LAN (non-hamradio)
+#
+# CONFIG_NET_RADIO is not set
+
+#
+# Wan interfaces
+#
+# CONFIG_WAN is not set
+# CONFIG_PPP is not set
+# CONFIG_SLIP is not set
+# CONFIG_SHAPER is not set
+# CONFIG_NETCONSOLE is not set
+CONFIG_UNIX98_PTYS=y
+
+#
+# File systems
+#
+CONFIG_EXT2_FS=y
+# CONFIG_EXT2_FS_XATTR is not set
+CONFIG_EXT3_FS=y
+CONFIG_EXT3_FS_XATTR=y
+# CONFIG_EXT3_FS_POSIX_ACL is not set
+# CONFIG_EXT3_FS_SECURITY is not set
+CONFIG_JBD=y
+# CONFIG_JBD_DEBUG is not set
+CONFIG_FS_MBCACHE=y
+CONFIG_REISERFS_FS=y
+# CONFIG_REISERFS_CHECK is not set
+# CONFIG_REISERFS_PROC_INFO is not set
+# CONFIG_REISERFS_FS_XATTR is not set
+# CONFIG_JFS_FS is not set
+# CONFIG_XFS_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_ROMFS_FS is not set
+# CONFIG_QUOTA is not set
+CONFIG_DNOTIFY=y
+CONFIG_AUTOFS_FS=y
+CONFIG_AUTOFS4_FS=y
+
+#
+# CD-ROM/DVD Filesystems
+#
+CONFIG_ISO9660_FS=y
+CONFIG_JOLIET=y
+CONFIG_ZISOFS=y
+CONFIG_ZISOFS_FS=y
+# CONFIG_UDF_FS is not set
+
+#
+# DOS/FAT/NT Filesystems
+#
+CONFIG_FAT_FS=m
+CONFIG_MSDOS_FS=m
+CONFIG_VFAT_FS=m
+CONFIG_FAT_DEFAULT_CODEPAGE=437
+CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+CONFIG_PROC_KCORE=y
+CONFIG_SYSFS=y
+# CONFIG_DEVFS_FS is not set
+CONFIG_DEVPTS_FS_XATTR=y
+# CONFIG_DEVPTS_FS_SECURITY is not set
+CONFIG_TMPFS=y
+# CONFIG_TMPFS_XATTR is not set
+# CONFIG_HUGETLBFS is not set
+# CONFIG_HUGETLB_PAGE is not set
+CONFIG_RAMFS=y
+
+#
+# Miscellaneous filesystems
+#
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_HFSPLUS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+# CONFIG_CRAMFS is not set
+# CONFIG_VXFS_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_SYSV_FS is not set
+# CONFIG_UFS_FS is not set
+
+#
+# Network File Systems
+#
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3=y
+# CONFIG_NFS_V4 is not set
+# CONFIG_NFS_DIRECTIO is not set
+# CONFIG_NFSD is not set
+CONFIG_ROOT_NFS=y
+CONFIG_LOCKD=y
+CONFIG_LOCKD_V4=y
+# CONFIG_EXPORTFS is not set
+CONFIG_SUNRPC=y
+# CONFIG_RPCSEC_GSS_KRB5 is not set
+# CONFIG_RPCSEC_GSS_SPKM3 is not set
+# CONFIG_SMB_FS is not set
+# CONFIG_CIFS is not set
+# CONFIG_NCP_FS is not set
+# CONFIG_CODA_FS is not set
+# CONFIG_AFS_FS is not set
+
+#
+# Partition Types
+#
+# CONFIG_PARTITION_ADVANCED is not set
+CONFIG_MSDOS_PARTITION=y
+
+#
+# Native Language Support
+#
+CONFIG_NLS=y
+CONFIG_NLS_DEFAULT="iso8859-1"
+CONFIG_NLS_CODEPAGE_437=y
+# CONFIG_NLS_CODEPAGE_737 is not set
+# CONFIG_NLS_CODEPAGE_775 is not set
+# CONFIG_NLS_CODEPAGE_850 is not set
+# CONFIG_NLS_CODEPAGE_852 is not set
+# CONFIG_NLS_CODEPAGE_855 is not set
+# CONFIG_NLS_CODEPAGE_857 is not set
+# CONFIG_NLS_CODEPAGE_860 is not set
+# CONFIG_NLS_CODEPAGE_861 is not set
+# CONFIG_NLS_CODEPAGE_862 is not set
+# CONFIG_NLS_CODEPAGE_863 is not set
+# CONFIG_NLS_CODEPAGE_864 is not set
+# CONFIG_NLS_CODEPAGE_865 is not set
+# CONFIG_NLS_CODEPAGE_866 is not set
+# CONFIG_NLS_CODEPAGE_869 is not set
+# CONFIG_NLS_CODEPAGE_936 is not set
+# CONFIG_NLS_CODEPAGE_950 is not set
+# CONFIG_NLS_CODEPAGE_932 is not set
+# CONFIG_NLS_CODEPAGE_949 is not set
+# CONFIG_NLS_CODEPAGE_874 is not set
+# CONFIG_NLS_ISO8859_8 is not set
+# CONFIG_NLS_CODEPAGE_1250 is not set
+# CONFIG_NLS_CODEPAGE_1251 is not set
+# CONFIG_NLS_ASCII is not set
+CONFIG_NLS_ISO8859_1=y
+# CONFIG_NLS_ISO8859_2 is not set
+# CONFIG_NLS_ISO8859_3 is not set
+# CONFIG_NLS_ISO8859_4 is not set
+# CONFIG_NLS_ISO8859_5 is not set
+# CONFIG_NLS_ISO8859_6 is not set
+# CONFIG_NLS_ISO8859_7 is not set
+# CONFIG_NLS_ISO8859_9 is not set
+# CONFIG_NLS_ISO8859_13 is not set
+# CONFIG_NLS_ISO8859_14 is not set
+# CONFIG_NLS_ISO8859_15 is not set
+# CONFIG_NLS_KOI8_R is not set
+# CONFIG_NLS_KOI8_U is not set
+# CONFIG_NLS_UTF8 is not set
+
+#
+# Security options
+#
+# CONFIG_KEYS is not set
+# CONFIG_SECURITY is not set
+
+#
+# Cryptographic options
+#
+CONFIG_CRYPTO=y
+# CONFIG_CRYPTO_HMAC is not set
+# CONFIG_CRYPTO_NULL is not set
+# CONFIG_CRYPTO_MD4 is not set
+CONFIG_CRYPTO_MD5=m
+# CONFIG_CRYPTO_SHA1 is not set
+# CONFIG_CRYPTO_SHA256 is not set
+# CONFIG_CRYPTO_SHA512 is not set
+# CONFIG_CRYPTO_WP512 is not set
+# CONFIG_CRYPTO_DES is not set
+# CONFIG_CRYPTO_BLOWFISH is not set
+# CONFIG_CRYPTO_TWOFISH is not set
+# CONFIG_CRYPTO_SERPENT is not set
+# CONFIG_CRYPTO_AES_586 is not set
+# CONFIG_CRYPTO_CAST5 is not set
+# CONFIG_CRYPTO_CAST6 is not set
+# CONFIG_CRYPTO_TEA is not set
+# CONFIG_CRYPTO_ARC4 is not set
+# CONFIG_CRYPTO_KHAZAD is not set
+# CONFIG_CRYPTO_ANUBIS is not set
+# CONFIG_CRYPTO_DEFLATE is not set
+# CONFIG_CRYPTO_MICHAEL_MIC is not set
+CONFIG_CRYPTO_CRC32C=m
+# CONFIG_CRYPTO_TEST is not set
+
+#
+# Library routines
+#
+# CONFIG_CRC_CCITT is not set
+# CONFIG_CRC32 is not set
+CONFIG_LIBCRC32C=m
+CONFIG_ZLIB_INFLATE=y
--- /dev/null
- protect_page(swapper_pg_dir, (void *)va, PROT_ON);
+#include <linux/init.h>
+#include <linux/string.h>
+#include <linux/delay.h>
+#include <linux/smp.h>
+#include <linux/module.h>
+#include <linux/percpu.h>
+#include <asm/semaphore.h>
+#include <asm/processor.h>
+#include <asm/i387.h>
+#include <asm/msr.h>
+#include <asm/io.h>
+#include <asm/mmu_context.h>
+#include <asm-xen/hypervisor.h>
+
+#include "cpu.h"
+
+DEFINE_PER_CPU(struct desc_struct, cpu_gdt_table[GDT_ENTRIES]);
+EXPORT_PER_CPU_SYMBOL(cpu_gdt_table);
+
+static int cachesize_override __initdata = -1;
+static int disable_x86_fxsr __initdata = 0;
+static int disable_x86_serial_nr __initdata = 1;
+
+struct cpu_dev * cpu_devs[X86_VENDOR_NUM] = {};
+
+extern void mcheck_init(struct cpuinfo_x86 *c);
+
+extern void machine_specific_modify_cpu_capabilities(struct cpuinfo_x86 *c);
+
+extern int disable_pse;
+
+static void default_init(struct cpuinfo_x86 * c)
+{
+ /* Not much we can do here... */
+ /* Check if at least it has cpuid */
+ if (c->cpuid_level == -1) {
+ /* No cpuid. It must be an ancient CPU */
+ if (c->x86 == 4)
+ strcpy(c->x86_model_id, "486");
+ else if (c->x86 == 3)
+ strcpy(c->x86_model_id, "386");
+ }
+}
+
+static struct cpu_dev default_cpu = {
+ .c_init = default_init,
+};
+static struct cpu_dev * this_cpu = &default_cpu;
+
+static int __init cachesize_setup(char *str)
+{
+ get_option (&str, &cachesize_override);
+ return 1;
+}
+__setup("cachesize=", cachesize_setup);
+
+int __init get_model_name(struct cpuinfo_x86 *c)
+{
+ unsigned int *v;
+ char *p, *q;
+
+ if (cpuid_eax(0x80000000) < 0x80000004)
+ return 0;
+
+ v = (unsigned int *) c->x86_model_id;
+ cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
+ cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
+ cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
+ c->x86_model_id[48] = 0;
+
+ /* Intel chips right-justify this string for some dumb reason;
+ undo that brain damage */
+ p = q = &c->x86_model_id[0];
+ while ( *p == ' ' )
+ p++;
+ if ( p != q ) {
+ while ( *p )
+ *q++ = *p++;
+ while ( q <= &c->x86_model_id[48] )
+ *q++ = '\0'; /* Zero-pad the rest */
+ }
+
+ return 1;
+}
+
+
+void __init display_cacheinfo(struct cpuinfo_x86 *c)
+{
+ unsigned int n, dummy, ecx, edx, l2size;
+
+ n = cpuid_eax(0x80000000);
+
+ if (n >= 0x80000005) {
+ cpuid(0x80000005, &dummy, &dummy, &ecx, &edx);
+ printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n",
+ edx>>24, edx&0xFF, ecx>>24, ecx&0xFF);
+ c->x86_cache_size=(ecx>>24)+(edx>>24);
+ }
+
+ if (n < 0x80000006) /* Some chips just has a large L1. */
+ return;
+
+ ecx = cpuid_ecx(0x80000006);
+ l2size = ecx >> 16;
+
+ /* do processor-specific cache resizing */
+ if (this_cpu->c_size_cache)
+ l2size = this_cpu->c_size_cache(c,l2size);
+
+ /* Allow user to override all this if necessary. */
+ if (cachesize_override != -1)
+ l2size = cachesize_override;
+
+ if ( l2size == 0 )
+ return; /* Again, no L2 cache is possible */
+
+ c->x86_cache_size = l2size;
+
+ printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n",
+ l2size, ecx & 0xFF);
+}
+
+/* Naming convention should be: <Name> [(<Codename>)] */
+/* This table only is used unless init_<vendor>() below doesn't set it; */
+/* in particular, if CPUID levels 0x80000002..4 are supported, this isn't used */
+
+/* Look up CPU names by table lookup. */
+static char __init *table_lookup_model(struct cpuinfo_x86 *c)
+{
+ struct cpu_model_info *info;
+
+ if ( c->x86_model >= 16 )
+ return NULL; /* Range check */
+
+ if (!this_cpu)
+ return NULL;
+
+ info = this_cpu->c_models;
+
+ while (info && info->family) {
+ if (info->family == c->x86)
+ return info->model_names[c->x86_model];
+ info++;
+ }
+ return NULL; /* Not found */
+}
+
+
+void __init get_cpu_vendor(struct cpuinfo_x86 *c, int early)
+{
+ char *v = c->x86_vendor_id;
+ int i;
+
+ for (i = 0; i < X86_VENDOR_NUM; i++) {
+ if (cpu_devs[i]) {
+ if (!strcmp(v,cpu_devs[i]->c_ident[0]) ||
+ (cpu_devs[i]->c_ident[1] &&
+ !strcmp(v,cpu_devs[i]->c_ident[1]))) {
+ c->x86_vendor = i;
+ if (!early)
+ this_cpu = cpu_devs[i];
+ break;
+ }
+ }
+ }
+}
+
+
+static int __init x86_fxsr_setup(char * s)
+{
+ disable_x86_fxsr = 1;
+ return 1;
+}
+__setup("nofxsr", x86_fxsr_setup);
+
+
+/* Standard macro to see if a specific flag is changeable */
+static inline int flag_is_changeable_p(u32 flag)
+{
+ u32 f1, f2;
+
+ asm("pushfl\n\t"
+ "pushfl\n\t"
+ "popl %0\n\t"
+ "movl %0,%1\n\t"
+ "xorl %2,%0\n\t"
+ "pushl %0\n\t"
+ "popfl\n\t"
+ "pushfl\n\t"
+ "popl %0\n\t"
+ "popfl\n\t"
+ : "=&r" (f1), "=&r" (f2)
+ : "ir" (flag));
+
+ return ((f1^f2) & flag) != 0;
+}
+
+
+/* Probe for the CPUID instruction */
+int __init have_cpuid_p(void)
+{
+ return flag_is_changeable_p(X86_EFLAGS_ID);
+}
+
+/* Do minimum CPU detection early.
+ Fields really needed: vendor, cpuid_level, family, model, mask, cache alignment.
+ The others are not touched to avoid unwanted side effects. */
+void __init early_cpu_detect(void)
+{
+ struct cpuinfo_x86 *c = &boot_cpu_data;
+
+ c->x86_cache_alignment = 32;
+
+ if (!have_cpuid_p())
+ return;
+
+ /* Get vendor name */
+ cpuid(0x00000000, &c->cpuid_level,
+ (int *)&c->x86_vendor_id[0],
+ (int *)&c->x86_vendor_id[8],
+ (int *)&c->x86_vendor_id[4]);
+
+ get_cpu_vendor(c, 1);
+
+ c->x86 = 4;
+ if (c->cpuid_level >= 0x00000001) {
+ u32 junk, tfms, cap0, misc;
+ cpuid(0x00000001, &tfms, &misc, &junk, &cap0);
+ c->x86 = (tfms >> 8) & 15;
+ c->x86_model = (tfms >> 4) & 15;
+ if (c->x86 == 0xf) {
+ c->x86 += (tfms >> 20) & 0xff;
+ c->x86_model += ((tfms >> 16) & 0xF) << 4;
+ }
+ c->x86_mask = tfms & 15;
+ if (cap0 & (1<<19))
+ c->x86_cache_alignment = ((misc >> 8) & 0xff) * 8;
+ }
+
+ early_intel_workaround(c);
+}
+
+void __init generic_identify(struct cpuinfo_x86 * c)
+{
+ u32 tfms, xlvl;
+ int junk;
+
+ if (have_cpuid_p()) {
+ /* Get vendor name */
+ cpuid(0x00000000, &c->cpuid_level,
+ (int *)&c->x86_vendor_id[0],
+ (int *)&c->x86_vendor_id[8],
+ (int *)&c->x86_vendor_id[4]);
+
+ get_cpu_vendor(c, 0);
+ /* Initialize the standard set of capabilities */
+ /* Note that the vendor-specific code below might override */
+
+ /* Intel-defined flags: level 0x00000001 */
+ if ( c->cpuid_level >= 0x00000001 ) {
+ u32 capability, excap;
+ cpuid(0x00000001, &tfms, &junk, &excap, &capability);
+ c->x86_capability[0] = capability;
+ c->x86_capability[4] = excap;
+ c->x86 = (tfms >> 8) & 15;
+ c->x86_model = (tfms >> 4) & 15;
+ if (c->x86 == 0xf) {
+ c->x86 += (tfms >> 20) & 0xff;
+ c->x86_model += ((tfms >> 16) & 0xF) << 4;
+ }
+ c->x86_mask = tfms & 15;
+ } else {
+ /* Have CPUID level 0 only - unheard of */
+ c->x86 = 4;
+ }
+
+ /* AMD-defined flags: level 0x80000001 */
+ xlvl = cpuid_eax(0x80000000);
+ if ( (xlvl & 0xffff0000) == 0x80000000 ) {
+ if ( xlvl >= 0x80000001 )
+ c->x86_capability[1] = cpuid_edx(0x80000001);
+ if ( xlvl >= 0x80000004 )
+ get_model_name(c); /* Default name */
+ }
+ }
+}
+
+static void __init squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
+{
+ if (cpu_has(c, X86_FEATURE_PN) && disable_x86_serial_nr ) {
+ /* Disable processor serial number */
+ unsigned long lo,hi;
+ rdmsr(MSR_IA32_BBL_CR_CTL,lo,hi);
+ lo |= 0x200000;
+ wrmsr(MSR_IA32_BBL_CR_CTL,lo,hi);
+ printk(KERN_NOTICE "CPU serial number disabled.\n");
+ clear_bit(X86_FEATURE_PN, c->x86_capability);
+
+ /* Disabling the serial number may affect the cpuid level */
+ c->cpuid_level = cpuid_eax(0);
+ }
+}
+
+static int __init x86_serial_nr_setup(char *s)
+{
+ disable_x86_serial_nr = 0;
+ return 1;
+}
+__setup("serialnumber", x86_serial_nr_setup);
+
+
+
+/*
+ * This does the hard work of actually picking apart the CPU stuff...
+ */
+void __init identify_cpu(struct cpuinfo_x86 *c)
+{
+ int i;
+
+ c->loops_per_jiffy = loops_per_jiffy;
+ c->x86_cache_size = -1;
+ c->x86_vendor = X86_VENDOR_UNKNOWN;
+ c->cpuid_level = -1; /* CPUID not detected */
+ c->x86_model = c->x86_mask = 0; /* So far unknown... */
+ c->x86_vendor_id[0] = '\0'; /* Unset */
+ c->x86_model_id[0] = '\0'; /* Unset */
+ memset(&c->x86_capability, 0, sizeof c->x86_capability);
+
+ if (!have_cpuid_p()) {
+ /* First of all, decide if this is a 486 or higher */
+ /* It's a 486 if we can modify the AC flag */
+ if ( flag_is_changeable_p(X86_EFLAGS_AC) )
+ c->x86 = 4;
+ else
+ c->x86 = 3;
+ }
+
+ generic_identify(c);
+
+ printk(KERN_DEBUG "CPU: After generic identify, caps: %08lx %08lx %08lx %08lx\n",
+ c->x86_capability[0],
+ c->x86_capability[1],
+ c->x86_capability[2],
+ c->x86_capability[3]);
+
+ if (this_cpu->c_identify) {
+ this_cpu->c_identify(c);
+
+ printk(KERN_DEBUG "CPU: After vendor identify, caps: %08lx %08lx %08lx %08lx\n",
+ c->x86_capability[0],
+ c->x86_capability[1],
+ c->x86_capability[2],
+ c->x86_capability[3]);
+}
+
+ /*
+ * Vendor-specific initialization. In this section we
+ * canonicalize the feature flags, meaning if there are
+ * features a certain CPU supports which CPUID doesn't
+ * tell us, CPUID claiming incorrect flags, or other bugs,
+ * we handle them here.
+ *
+ * At the end of this section, c->x86_capability better
+ * indicate the features this CPU genuinely supports!
+ */
+ if (this_cpu->c_init)
+ this_cpu->c_init(c);
+
+ /* Disable the PN if appropriate */
+ squash_the_stupid_serial_number(c);
+
+ /*
+ * The vendor-specific functions might have changed features. Now
+ * we do "generic changes."
+ */
+
+ /* TSC disabled? */
+ if ( tsc_disable )
+ clear_bit(X86_FEATURE_TSC, c->x86_capability);
+
+ /* FXSR disabled? */
+ if (disable_x86_fxsr) {
+ clear_bit(X86_FEATURE_FXSR, c->x86_capability);
+ clear_bit(X86_FEATURE_XMM, c->x86_capability);
+ }
+
+ if (disable_pse)
+ clear_bit(X86_FEATURE_PSE, c->x86_capability);
+
+ /* If the model name is still unset, do table lookup. */
+ if ( !c->x86_model_id[0] ) {
+ char *p;
+ p = table_lookup_model(c);
+ if ( p )
+ strcpy(c->x86_model_id, p);
+ else
+ /* Last resort... */
+ sprintf(c->x86_model_id, "%02x/%02x",
+ c->x86_vendor, c->x86_model);
+ }
+
+ machine_specific_modify_cpu_capabilities(c);
+
+ /* Now the feature flags better reflect actual CPU features! */
+
+ printk(KERN_DEBUG "CPU: After all inits, caps: %08lx %08lx %08lx %08lx\n",
+ c->x86_capability[0],
+ c->x86_capability[1],
+ c->x86_capability[2],
+ c->x86_capability[3]);
+
+ /*
+ * On SMP, boot_cpu_data holds the common feature set between
+ * all CPUs; so make sure that we indicate which features are
+ * common between the CPUs. The first time this routine gets
+ * executed, c == &boot_cpu_data.
+ */
+ if ( c != &boot_cpu_data ) {
+ /* AND the already accumulated flags with these */
+ for ( i = 0 ; i < NCAPINTS ; i++ )
+ boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
+ }
+
+ /* Init Machine Check Exception if available. */
+#ifdef CONFIG_X86_MCE
+ mcheck_init(c);
+#endif
+}
+/*
+ * Perform early boot up checks for a valid TSC. See arch/i386/kernel/time.c
+ */
+
+void __init dodgy_tsc(void)
+{
+ if (( boot_cpu_data.x86_vendor == X86_VENDOR_CYRIX ) ||
+ ( boot_cpu_data.x86_vendor == X86_VENDOR_NSC ))
+ cpu_devs[X86_VENDOR_CYRIX]->c_init(&boot_cpu_data);
+}
+
+void __init print_cpu_info(struct cpuinfo_x86 *c)
+{
+ char *vendor = NULL;
+
+ if (c->x86_vendor < X86_VENDOR_NUM)
+ vendor = this_cpu->c_vendor;
+ else if (c->cpuid_level >= 0)
+ vendor = c->x86_vendor_id;
+
+ if (vendor && strncmp(c->x86_model_id, vendor, strlen(vendor)))
+ printk("%s ", vendor);
+
+ if (!c->x86_model_id[0])
+ printk("%d86", c->x86);
+ else
+ printk("%s", c->x86_model_id);
+
+ if (c->x86_mask || c->cpuid_level >= 0)
+ printk(" stepping %02x\n", c->x86_mask);
+ else
+ printk("\n");
+}
+
+unsigned long cpu_initialized __initdata = 0;
+
+/* This is hacky. :)
+ * We're emulating future behavior.
+ * In the future, the cpu-specific init functions will be called implicitly
+ * via the magic of initcalls.
+ * They will insert themselves into the cpu_devs structure.
+ * Then, when cpu_init() is called, we can just iterate over that array.
+ */
+
+extern int intel_cpu_init(void);
+extern int cyrix_init_cpu(void);
+extern int nsc_init_cpu(void);
+extern int amd_init_cpu(void);
+extern int centaur_init_cpu(void);
+extern int transmeta_init_cpu(void);
+extern int rise_init_cpu(void);
+extern int nexgen_init_cpu(void);
+extern int umc_init_cpu(void);
+void early_cpu_detect(void);
+
+void __init early_cpu_init(void)
+{
+ intel_cpu_init();
+ cyrix_init_cpu();
+ nsc_init_cpu();
+ amd_init_cpu();
+ centaur_init_cpu();
+ transmeta_init_cpu();
+ rise_init_cpu();
+ nexgen_init_cpu();
+ umc_init_cpu();
+ early_cpu_detect();
+
+#ifdef CONFIG_DEBUG_PAGEALLOC
+ /* pse is not compatible with on-the-fly unmapping,
+ * disable it even if the cpus claim to support it.
+ */
+ clear_bit(X86_FEATURE_PSE, boot_cpu_data.x86_capability);
+ disable_pse = 1;
+#endif
+}
+
+void __init cpu_gdt_init(struct Xgt_desc_struct *gdt_descr)
+{
+ unsigned long frames[gdt_descr->size >> PAGE_SHIFT];
+ unsigned long va;
+ int f;
+
+ for (va = gdt_descr->address, f = 0;
+ va < gdt_descr->address + gdt_descr->size;
+ va += PAGE_SIZE, f++) {
+ frames[f] = virt_to_machine(va) >> PAGE_SHIFT;
++ make_page_readonly((void *)va);
+ }
+ flush_page_update_queue();
+ if (HYPERVISOR_set_gdt(frames, gdt_descr->size / 8))
+ BUG();
+ lgdt_finish();
+}
+
+/*
+ * cpu_init() initializes state that is per-CPU. Some data is already
+ * initialized (naturally) in the bootstrap process, such as the GDT
+ * and IDT. We reload them nevertheless, this function acts as a
+ * 'CPU state barrier', nothing should get across.
+ */
+void __init cpu_init (void)
+{
+ int cpu = smp_processor_id();
+ struct tss_struct * t = &per_cpu(init_tss, cpu);
+ struct thread_struct *thread = ¤t->thread;
+
+ if (test_and_set_bit(cpu, &cpu_initialized)) {
+ printk(KERN_WARNING "CPU#%d already initialized!\n", cpu);
+ for (;;) local_irq_enable();
+ }
+ printk(KERN_INFO "Initializing CPU#%d\n", cpu);
+
+ if (cpu_has_vme || cpu_has_de)
+ clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
+ if (tsc_disable && cpu_has_tsc) {
+ printk(KERN_NOTICE "Disabling TSC...\n");
+ /**** FIX-HPA: DOES THIS REALLY BELONG HERE? ****/
+ clear_bit(X86_FEATURE_TSC, boot_cpu_data.x86_capability);
+ set_in_cr4(X86_CR4_TSD);
+ }
+
+ /*
+ * Initialize the per-CPU GDT with the boot GDT,
+ * and set up the GDT descriptor:
+ */
+ if (cpu) {
+ cpu_gdt_descr[cpu].size = GDT_SIZE;
+ cpu_gdt_descr[cpu].address = 0; /* XXXcl alloc page */
+ BUG(); /* XXXcl SMP */
+ memcpy((void *)cpu_gdt_descr[cpu].address,
+ (void *)cpu_gdt_descr[0].address, GDT_SIZE);
+ }
+ /*
+ * Set up the per-thread TLS descriptor cache:
+ */
+ memcpy(thread->tls_array, &get_cpu_gdt_table(cpu)[GDT_ENTRY_TLS_MIN],
+ GDT_ENTRY_TLS_ENTRIES * 8);
+
+ cpu_gdt_init(&cpu_gdt_descr[cpu]);
+
+ /*
+ * Delete NT
+ */
+ __asm__("pushfl ; andl $0xffffbfff,(%esp) ; popfl");
+
+ /*
+ * Set up and load the per-CPU TSS and LDT
+ */
+ atomic_inc(&init_mm.mm_count);
+ current->active_mm = &init_mm;
+ if (current->mm)
+ BUG();
+ enter_lazy_tlb(&init_mm, current);
+
+ load_esp0(t, thread);
+
+ load_LDT(&init_mm.context);
+ flush_page_update_queue();
+
+ /* Clear %fs and %gs. */
+ asm volatile ("xorl %eax, %eax; movl %eax, %fs; movl %eax, %gs");
+
+ /* Clear all 6 debug registers: */
+
+#define CD(register) HYPERVISOR_set_debugreg(register, 0)
+
+ CD(0); CD(1); CD(2); CD(3); /* no db4 and db5 */; CD(6); CD(7);
+
+#undef CD
+
+ /*
+ * Force FPU initialization:
+ */
+ current_thread_info()->status = 0;
+ current->used_math = 0;
+ mxcsr_feature_mask_init();
+}
--- /dev/null
- unsigned long pfn, i;
+/*
+ * Dynamic DMA mapping support.
+ *
+ * On i386 there is no hardware dynamic DMA address translation,
+ * so consistent alloc/free are merely page allocation/freeing.
+ * The rest of the dynamic DMA mapping interface is implemented
+ * in asm/pci.h.
+ */
+
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+#include <linux/pci.h>
+#include <linux/version.h>
+#include <asm/io.h>
++#include <asm-xen/balloon.h>
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
+#define pte_offset_kernel pte_offset
+#endif
+
+struct dma_coherent_mem {
+ void *virt_base;
+ u32 device_base;
+ int size;
+ int flags;
+ unsigned long *bitmap;
+};
+
+static void
+xen_contig_memory(unsigned long vstart, unsigned int order)
+{
+ /*
+ * Ensure multi-page extents are contiguous in machine memory.
+ * This code could be cleaned up some, and the number of
+ * hypercalls reduced.
+ */
+ pgd_t *pgd;
+ pmd_t *pmd;
+ pte_t *pte;
++ unsigned long pfn, i, flags;
+
+ scrub_pages(vstart, 1 << order);
++
++ balloon_lock(flags);
++
+ /* 1. Zap current PTEs, giving away the underlying pages. */
+ for (i = 0; i < (1<<order); i++) {
+ pgd = pgd_offset_k( (vstart + (i*PAGE_SIZE)));
+ pmd = pmd_offset(pgd, (vstart + (i*PAGE_SIZE)));
+ pte = pte_offset_kernel(pmd, (vstart + (i*PAGE_SIZE)));
+ pfn = pte->pte_low >> PAGE_SHIFT;
+ queue_l1_entry_update(pte, 0);
+ phys_to_machine_mapping[(__pa(vstart)>>PAGE_SHIFT)+i] =
+ INVALID_P2M_ENTRY;
+ flush_page_update_queue();
+ if (HYPERVISOR_dom_mem_op(MEMOP_decrease_reservation,
+ &pfn, 1, 0) != 1) BUG();
+ }
+ /* 2. Get a new contiguous memory extent. */
+ if (HYPERVISOR_dom_mem_op(MEMOP_increase_reservation,
+ &pfn, 1, order) != 1) BUG();
+ /* 3. Map the new extent in place of old pages. */
+ for (i = 0; i < (1<<order); i++) {
+ pgd = pgd_offset_k( (vstart + (i*PAGE_SIZE)));
+ pmd = pmd_offset(pgd, (vstart + (i*PAGE_SIZE)));
+ pte = pte_offset_kernel(pmd, (vstart + (i*PAGE_SIZE)));
+ queue_l1_entry_update(
+ pte, ((pfn+i)<<PAGE_SHIFT)|__PAGE_KERNEL);
+ queue_machphys_update(
+ pfn+i, (__pa(vstart)>>PAGE_SHIFT)+i);
+ phys_to_machine_mapping[(__pa(vstart)>>PAGE_SHIFT)+i] =
+ pfn+i;
+ }
+ /* Flush updates through and flush the TLB. */
+ xen_tlb_flush();
++
++ balloon_unlock(flags);
+}
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
+void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size,
+ dma_addr_t *dma_handle)
+#else
+void *dma_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, int gfp)
+#endif
+{
+ void *ret;
+ unsigned int order = get_order(size);
+ unsigned long vstart;
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
+ int gfp = GFP_ATOMIC;
+
+ if (hwdev == NULL || ((u32)hwdev->dma_mask < 0xffffffff))
+ gfp |= GFP_DMA;
+#else
+ struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
+
+ /* ignore region specifiers */
+ gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
+
+ if (mem) {
+ int page = bitmap_find_free_region(mem->bitmap, mem->size,
+ order);
+ if (page >= 0) {
+ *dma_handle = mem->device_base + (page << PAGE_SHIFT);
+ ret = mem->virt_base + (page << PAGE_SHIFT);
+ memset(ret, 0, size);
+ return ret;
+ }
+ if (mem->flags & DMA_MEMORY_EXCLUSIVE)
+ return NULL;
+ }
+
+ if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
+ gfp |= GFP_DMA;
+#endif
+
+ vstart = __get_free_pages(gfp, order);
+ ret = (void *)vstart;
+ if (ret == NULL)
+ return ret;
+
+ xen_contig_memory(vstart, order);
+
+ memset(ret, 0, size);
+ *dma_handle = virt_to_bus(ret);
+
+ return ret;
+}
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
+void pci_free_consistent(struct pci_dev *hwdev, size_t size,
+ void *vaddr, dma_addr_t dma_handle)
+{
+ free_pages((unsigned long)vaddr, get_order(size));
+}
+#else
+
+void dma_free_coherent(struct device *dev, size_t size,
+ void *vaddr, dma_addr_t dma_handle)
+{
+ struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
+ int order = get_order(size);
+
+ if (mem && vaddr >= mem->virt_base && vaddr < (mem->virt_base + (mem->size << PAGE_SHIFT))) {
+ int page = (vaddr - mem->virt_base) >> PAGE_SHIFT;
+
+ bitmap_release_region(mem->bitmap, page, order);
+ } else
+ free_pages((unsigned long)vaddr, order);
+}
+
+int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
+ dma_addr_t device_addr, size_t size, int flags)
+{
+ void __iomem *mem_base;
+ int pages = size >> PAGE_SHIFT;
+ int bitmap_size = (pages + 31)/32;
+
+ if ((flags & (DMA_MEMORY_MAP | DMA_MEMORY_IO)) == 0)
+ goto out;
+ if (!size)
+ goto out;
+ if (dev->dma_mem)
+ goto out;
+
+ /* FIXME: this routine just ignores DMA_MEMORY_INCLUDES_CHILDREN */
+
+ mem_base = ioremap(bus_addr, size);
+ if (!mem_base)
+ goto out;
+
+ dev->dma_mem = kmalloc(GFP_KERNEL, sizeof(struct dma_coherent_mem));
+ if (!dev->dma_mem)
+ goto out;
+ memset(dev->dma_mem, 0, sizeof(struct dma_coherent_mem));
+ dev->dma_mem->bitmap = kmalloc(GFP_KERNEL, bitmap_size);
+ if (!dev->dma_mem->bitmap)
+ goto free1_out;
+ memset(dev->dma_mem->bitmap, 0, bitmap_size);
+
+ dev->dma_mem->virt_base = mem_base;
+ dev->dma_mem->device_base = device_addr;
+ dev->dma_mem->size = pages;
+ dev->dma_mem->flags = flags;
+
+ if (flags & DMA_MEMORY_MAP)
+ return DMA_MEMORY_MAP;
+
+ return DMA_MEMORY_IO;
+
+ free1_out:
+ kfree(dev->dma_mem->bitmap);
+ out:
+ return 0;
+}
+EXPORT_SYMBOL(dma_declare_coherent_memory);
+
+void dma_release_declared_memory(struct device *dev)
+{
+ struct dma_coherent_mem *mem = dev->dma_mem;
+
+ if(!mem)
+ return;
+ dev->dma_mem = NULL;
+ kfree(mem->bitmap);
+ kfree(mem);
+}
+EXPORT_SYMBOL(dma_release_declared_memory);
+
+void *dma_mark_declared_memory_occupied(struct device *dev,
+ dma_addr_t device_addr, size_t size)
+{
+ struct dma_coherent_mem *mem = dev->dma_mem;
+ int pages = (size + (device_addr & ~PAGE_MASK) + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ int pos, err;
+
+ if (!mem)
+ return ERR_PTR(-EINVAL);
+
+ pos = (device_addr - mem->device_base) >> PAGE_SHIFT;
+ err = bitmap_allocate_region(mem->bitmap, pos, get_order(pages));
+ if (err != 0)
+ return ERR_PTR(err);
+ return mem->virt_base + (pos << PAGE_SHIFT);
+}
+EXPORT_SYMBOL(dma_mark_declared_memory_occupied);
+
+#endif
--- /dev/null
-
+/*
+ * linux/arch/i386/kernel/setup.c
+ *
+ * Copyright (C) 1995 Linus Torvalds
+ *
+ * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
+ *
+ * Memory region support
+ * David Parsons <orc@pell.chi.il.us>, July-August 1999
+ *
+ * Added E820 sanitization routine (removes overlapping memory regions);
+ * Brian Moyle <bmoyle@mvista.com>, February 2001
+ *
+ * Moved CPU detection code to cpu/${cpu}.c
+ * Patrick Mochel <mochel@osdl.org>, March 2002
+ *
+ * Provisions for empty E820 memory regions (reported by certain BIOSes).
+ * Alex Achenbach <xela@slit.de>, December 2002.
+ *
+ */
+
+/*
+ * This file handles the architecture-dependent parts of initialization
+ */
+
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/tty.h>
+#include <linux/ioport.h>
+#include <linux/acpi.h>
+#include <linux/apm_bios.h>
+#include <linux/initrd.h>
+#include <linux/bootmem.h>
+#include <linux/seq_file.h>
+#include <linux/console.h>
+#include <linux/root_dev.h>
+#include <linux/highmem.h>
+#include <linux/module.h>
+#include <linux/efi.h>
+#include <linux/init.h>
+#include <linux/edd.h>
+#include <video/edid.h>
+#include <asm/e820.h>
+#include <asm/mpspec.h>
+#include <asm/setup.h>
+#include <asm/arch_hooks.h>
+#include <asm/sections.h>
+#include <asm/io_apic.h>
+#include <asm/ist.h>
+#include <asm/io.h>
+#include <asm-xen/hypervisor.h>
+#include "setup_arch_pre.h"
+#include <bios_ebda.h>
+
++/* Allows setting of maximum possible memory size */
++static unsigned long xen_override_max_pfn;
++
+int disable_pse __initdata = 0;
+
+/*
+ * Machine setup..
+ */
+
+#ifdef CONFIG_EFI
+int efi_enabled = 0;
+EXPORT_SYMBOL(efi_enabled);
+#endif
+
+/* cpu data as detected by the assembly code in head.S */
+struct cpuinfo_x86 new_cpu_data __initdata = { 0, 0, 0, 0, -1, 0, 1, 0, -1 };
+/* common cpu data for all cpus */
+struct cpuinfo_x86 boot_cpu_data = { 0, 0, 0, 0, -1, 0, 1, 0, -1 };
+
+unsigned long mmu_cr4_features;
+EXPORT_SYMBOL_GPL(mmu_cr4_features);
+
+#ifdef CONFIG_ACPI_INTERPRETER
+ int acpi_disabled = 0;
+#else
+ int acpi_disabled = 1;
+#endif
+EXPORT_SYMBOL(acpi_disabled);
+
+#ifdef CONFIG_ACPI_BOOT
+int __initdata acpi_force = 0;
+extern acpi_interrupt_flags acpi_sci_flags;
+#endif
+
+int MCA_bus;
+/* for MCA, but anyone else can use it if they want */
+unsigned int machine_id;
+unsigned int machine_submodel_id;
+unsigned int BIOS_revision;
+unsigned int mca_pentium_flag;
+
+/* For PCI or other memory-mapped resources */
+unsigned long pci_mem_start = 0x10000000;
+
+/* user-defined highmem size */
+static unsigned int highmem_pages = -1;
+
+/*
+ * Setup options
+ */
+struct drive_info_struct { char dummy[32]; } drive_info;
+struct screen_info screen_info;
+struct apm_info apm_info;
+struct sys_desc_table_struct {
+ unsigned short length;
+ unsigned char table[0];
+};
+struct edid_info edid_info;
+struct ist_info ist_info;
+struct e820map e820;
+
+unsigned char aux_device_present;
+
+extern void early_cpu_init(void);
+extern void dmi_scan_machine(void);
+extern void generic_apic_probe(char *);
+extern int root_mountflags;
+
+unsigned long saved_videomode;
+
+#define RAMDISK_IMAGE_START_MASK 0x07FF
+#define RAMDISK_PROMPT_FLAG 0x8000
+#define RAMDISK_LOAD_FLAG 0x4000
+
+static char command_line[COMMAND_LINE_SIZE];
+
+unsigned char __initdata boot_params[PARAM_SIZE];
+
+static struct resource data_resource = {
+ .name = "Kernel data",
+ .start = 0,
+ .end = 0,
+ .flags = IORESOURCE_BUSY | IORESOURCE_MEM
+};
+
+static struct resource code_resource = {
+ .name = "Kernel code",
+ .start = 0,
+ .end = 0,
+ .flags = IORESOURCE_BUSY | IORESOURCE_MEM
+};
+
+#ifdef CONFIG_XEN_PRIVILEGED_GUEST
+static struct resource system_rom_resource = {
+ .name = "System ROM",
+ .start = 0xf0000,
+ .end = 0xfffff,
+ .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
+};
+
+static struct resource extension_rom_resource = {
+ .name = "Extension ROM",
+ .start = 0xe0000,
+ .end = 0xeffff,
+ .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
+};
+
+static struct resource adapter_rom_resources[] = { {
+ .name = "Adapter ROM",
+ .start = 0xc8000,
+ .end = 0,
+ .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
+}, {
+ .name = "Adapter ROM",
+ .start = 0,
+ .end = 0,
+ .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
+}, {
+ .name = "Adapter ROM",
+ .start = 0,
+ .end = 0,
+ .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
+}, {
+ .name = "Adapter ROM",
+ .start = 0,
+ .end = 0,
+ .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
+}, {
+ .name = "Adapter ROM",
+ .start = 0,
+ .end = 0,
+ .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
+}, {
+ .name = "Adapter ROM",
+ .start = 0,
+ .end = 0,
+ .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
+} };
+
+#define ADAPTER_ROM_RESOURCES \
+ (sizeof adapter_rom_resources / sizeof adapter_rom_resources[0])
+
+static struct resource video_rom_resource = {
+ .name = "Video ROM",
+ .start = 0xc0000,
+ .end = 0xc7fff,
+ .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
+};
+#endif
+
+static struct resource video_ram_resource = {
+ .name = "Video RAM area",
+ .start = 0xa0000,
+ .end = 0xbffff,
+ .flags = IORESOURCE_BUSY | IORESOURCE_MEM
+};
+
+static struct resource standard_io_resources[] = { {
+ .name = "dma1",
+ .start = 0x0000,
+ .end = 0x001f,
+ .flags = IORESOURCE_BUSY | IORESOURCE_IO
+}, {
+ .name = "pic1",
+ .start = 0x0020,
+ .end = 0x0021,
+ .flags = IORESOURCE_BUSY | IORESOURCE_IO
+}, {
+ .name = "timer0",
+ .start = 0x0040,
+ .end = 0x0043,
+ .flags = IORESOURCE_BUSY | IORESOURCE_IO
+}, {
+ .name = "timer1",
+ .start = 0x0050,
+ .end = 0x0053,
+ .flags = IORESOURCE_BUSY | IORESOURCE_IO
+}, {
+ .name = "keyboard",
+ .start = 0x0060,
+ .end = 0x006f,
+ .flags = IORESOURCE_BUSY | IORESOURCE_IO
+}, {
+ .name = "dma page reg",
+ .start = 0x0080,
+ .end = 0x008f,
+ .flags = IORESOURCE_BUSY | IORESOURCE_IO
+}, {
+ .name = "pic2",
+ .start = 0x00a0,
+ .end = 0x00a1,
+ .flags = IORESOURCE_BUSY | IORESOURCE_IO
+}, {
+ .name = "dma2",
+ .start = 0x00c0,
+ .end = 0x00df,
+ .flags = IORESOURCE_BUSY | IORESOURCE_IO
+}, {
+ .name = "fpu",
+ .start = 0x00f0,
+ .end = 0x00ff,
+ .flags = IORESOURCE_BUSY | IORESOURCE_IO
+} };
+
+#define STANDARD_IO_RESOURCES \
+ (sizeof standard_io_resources / sizeof standard_io_resources[0])
+
+#ifdef CONFIG_XEN_PRIVILEGED_GUEST
+#define romsignature(x) (*(unsigned short *)(x) == 0xaa55)
+
+static int __init romchecksum(unsigned char *rom, unsigned long length)
+{
+ unsigned char *p, sum = 0;
+
+ for (p = rom; p < rom + length; p++)
+ sum += *p;
+ return sum == 0;
+}
+
+static void __init probe_roms(void)
+{
+ unsigned long start, length, upper;
+ unsigned char *rom;
+ int i;
+
+ /* video rom */
+ upper = adapter_rom_resources[0].start;
+ for (start = video_rom_resource.start; start < upper; start += 2048) {
+ rom = isa_bus_to_virt(start);
+ if (!romsignature(rom))
+ continue;
+
+ video_rom_resource.start = start;
+
+ /* 0 < length <= 0x7f * 512, historically */
+ length = rom[2] * 512;
+
+ /* if checksum okay, trust length byte */
+ if (length && romchecksum(rom, length))
+ video_rom_resource.end = start + length - 1;
+
+ request_resource(&iomem_resource, &video_rom_resource);
+ break;
+ }
+
+ start = (video_rom_resource.end + 1 + 2047) & ~2047UL;
+ if (start < upper)
+ start = upper;
+
+ /* system rom */
+ request_resource(&iomem_resource, &system_rom_resource);
+ upper = system_rom_resource.start;
+
+ /* check for extension rom (ignore length byte!) */
+ rom = isa_bus_to_virt(extension_rom_resource.start);
+ if (romsignature(rom)) {
+ length = extension_rom_resource.end - extension_rom_resource.start + 1;
+ if (romchecksum(rom, length)) {
+ request_resource(&iomem_resource, &extension_rom_resource);
+ upper = extension_rom_resource.start;
+ }
+ }
+
+ /* check for adapter roms on 2k boundaries */
+ for (i = 0; i < ADAPTER_ROM_RESOURCES && start < upper; start += 2048) {
+ rom = isa_bus_to_virt(start);
+ if (!romsignature(rom))
+ continue;
+
+ /* 0 < length <= 0x7f * 512, historically */
+ length = rom[2] * 512;
+
+ /* but accept any length that fits if checksum okay */
+ if (!length || start + length > upper || !romchecksum(rom, length))
+ continue;
+
+ adapter_rom_resources[i].start = start;
+ adapter_rom_resources[i].end = start + length - 1;
+ request_resource(&iomem_resource, &adapter_rom_resources[i]);
+
+ start = adapter_rom_resources[i++].end & ~2047UL;
+ }
+}
+#endif
+
+/*
+ * Point at the empty zero page to start with. We map the real shared_info
+ * page as soon as fixmap is up and running.
+ */
+shared_info_t *HYPERVISOR_shared_info = (shared_info_t *)empty_zero_page;
+EXPORT_SYMBOL(HYPERVISOR_shared_info);
+
+unsigned long *phys_to_machine_mapping, *pfn_to_mfn_frame_list;
+EXPORT_SYMBOL(phys_to_machine_mapping);
+
+multicall_entry_t multicall_list[8];
+int nr_multicall_ents = 0;
+
+/* Raw start-of-day parameters from the hypervisor. */
+union xen_start_info_union xen_start_info_union;
+
+static void __init limit_regions(unsigned long long size)
+{
+ unsigned long long current_addr = 0;
+ int i;
+
+ if (efi_enabled) {
+ for (i = 0; i < memmap.nr_map; i++) {
+ current_addr = memmap.map[i].phys_addr +
+ (memmap.map[i].num_pages << 12);
+ if (memmap.map[i].type == EFI_CONVENTIONAL_MEMORY) {
+ if (current_addr >= size) {
+ memmap.map[i].num_pages -=
+ (((current_addr-size) + PAGE_SIZE-1) >> PAGE_SHIFT);
+ memmap.nr_map = i + 1;
+ return;
+ }
+ }
+ }
+ }
+ for (i = 0; i < e820.nr_map; i++) {
+ if (e820.map[i].type == E820_RAM) {
+ current_addr = e820.map[i].addr + e820.map[i].size;
+ if (current_addr >= size) {
+ e820.map[i].size -= current_addr-size;
+ e820.nr_map = i + 1;
+ return;
+ }
+ }
+ }
+}
+
+static void __init add_memory_region(unsigned long long start,
+ unsigned long long size, int type)
+{
+ int x;
+
+ if (!efi_enabled) {
+ x = e820.nr_map;
+
+ if (x == E820MAX) {
+ printk(KERN_ERR "Ooops! Too many entries in the memory map!\n");
+ return;
+ }
+
+ e820.map[x].addr = start;
+ e820.map[x].size = size;
+ e820.map[x].type = type;
+ e820.nr_map++;
+ }
+} /* add_memory_region */
+
+#define E820_DEBUG 1
+
+static void __init print_memory_map(char *who)
+{
+ int i;
+
+ for (i = 0; i < e820.nr_map; i++) {
+ printk(" %s: %016Lx - %016Lx ", who,
+ e820.map[i].addr,
+ e820.map[i].addr + e820.map[i].size);
+ switch (e820.map[i].type) {
+ case E820_RAM: printk("(usable)\n");
+ break;
+ case E820_RESERVED:
+ printk("(reserved)\n");
+ break;
+ case E820_ACPI:
+ printk("(ACPI data)\n");
+ break;
+ case E820_NVS:
+ printk("(ACPI NVS)\n");
+ break;
+ default: printk("type %lu\n", e820.map[i].type);
+ break;
+ }
+ }
+}
+
+#if 0
+/*
+ * Sanitize the BIOS e820 map.
+ *
+ * Some e820 responses include overlapping entries. The following
+ * replaces the original e820 map with a new one, removing overlaps.
+ *
+ */
+struct change_member {
+ struct e820entry *pbios; /* pointer to original bios entry */
+ unsigned long long addr; /* address for this change point */
+};
+struct change_member change_point_list[2*E820MAX] __initdata;
+struct change_member *change_point[2*E820MAX] __initdata;
+struct e820entry *overlap_list[E820MAX] __initdata;
+struct e820entry new_bios[E820MAX] __initdata;
+
+static int __init sanitize_e820_map(struct e820entry * biosmap, char * pnr_map)
+{
+ struct change_member *change_tmp;
+ unsigned long current_type, last_type;
+ unsigned long long last_addr;
+ int chgidx, still_changing;
+ int overlap_entries;
+ int new_bios_entry;
+ int old_nr, new_nr, chg_nr;
+ int i;
+
+ /*
+ Visually we're performing the following (1,2,3,4 = memory types)...
+
+ Sample memory map (w/overlaps):
+ ____22__________________
+ ______________________4_
+ ____1111________________
+ _44_____________________
+ 11111111________________
+ ____________________33__
+ ___________44___________
+ __________33333_________
+ ______________22________
+ ___________________2222_
+ _________111111111______
+ _____________________11_
+ _________________4______
+
+ Sanitized equivalent (no overlap):
+ 1_______________________
+ _44_____________________
+ ___1____________________
+ ____22__________________
+ ______11________________
+ _________1______________
+ __________3_____________
+ ___________44___________
+ _____________33_________
+ _______________2________
+ ________________1_______
+ _________________4______
+ ___________________2____
+ ____________________33__
+ ______________________4_
+ */
+
+ /* if there's only one memory region, don't bother */
+ if (*pnr_map < 2)
+ return -1;
+
+ old_nr = *pnr_map;
+
+ /* bail out if we find any unreasonable addresses in bios map */
+ for (i=0; i<old_nr; i++)
+ if (biosmap[i].addr + biosmap[i].size < biosmap[i].addr)
+ return -1;
+
+ /* create pointers for initial change-point information (for sorting) */
+ for (i=0; i < 2*old_nr; i++)
+ change_point[i] = &change_point_list[i];
+
+ /* record all known change-points (starting and ending addresses),
+ omitting those that are for empty memory regions */
+ chgidx = 0;
+ for (i=0; i < old_nr; i++) {
+ if (biosmap[i].size != 0) {
+ change_point[chgidx]->addr = biosmap[i].addr;
+ change_point[chgidx++]->pbios = &biosmap[i];
+ change_point[chgidx]->addr = biosmap[i].addr + biosmap[i].size;
+ change_point[chgidx++]->pbios = &biosmap[i];
+ }
+ }
+ chg_nr = chgidx; /* true number of change-points */
+
+ /* sort change-point list by memory addresses (low -> high) */
+ still_changing = 1;
+ while (still_changing) {
+ still_changing = 0;
+ for (i=1; i < chg_nr; i++) {
+ /* if <current_addr> > <last_addr>, swap */
+ /* or, if current=<start_addr> & last=<end_addr>, swap */
+ if ((change_point[i]->addr < change_point[i-1]->addr) ||
+ ((change_point[i]->addr == change_point[i-1]->addr) &&
+ (change_point[i]->addr == change_point[i]->pbios->addr) &&
+ (change_point[i-1]->addr != change_point[i-1]->pbios->addr))
+ )
+ {
+ change_tmp = change_point[i];
+ change_point[i] = change_point[i-1];
+ change_point[i-1] = change_tmp;
+ still_changing=1;
+ }
+ }
+ }
+
+ /* create a new bios memory map, removing overlaps */
+ overlap_entries=0; /* number of entries in the overlap table */
+ new_bios_entry=0; /* index for creating new bios map entries */
+ last_type = 0; /* start with undefined memory type */
+ last_addr = 0; /* start with 0 as last starting address */
+ /* loop through change-points, determining affect on the new bios map */
+ for (chgidx=0; chgidx < chg_nr; chgidx++)
+ {
+ /* keep track of all overlapping bios entries */
+ if (change_point[chgidx]->addr == change_point[chgidx]->pbios->addr)
+ {
+ /* add map entry to overlap list (> 1 entry implies an overlap) */
+ overlap_list[overlap_entries++]=change_point[chgidx]->pbios;
+ }
+ else
+ {
+ /* remove entry from list (order independent, so swap with last) */
+ for (i=0; i<overlap_entries; i++)
+ {
+ if (overlap_list[i] == change_point[chgidx]->pbios)
+ overlap_list[i] = overlap_list[overlap_entries-1];
+ }
+ overlap_entries--;
+ }
+ /* if there are overlapping entries, decide which "type" to use */
+ /* (larger value takes precedence -- 1=usable, 2,3,4,4+=unusable) */
+ current_type = 0;
+ for (i=0; i<overlap_entries; i++)
+ if (overlap_list[i]->type > current_type)
+ current_type = overlap_list[i]->type;
+ /* continue building up new bios map based on this information */
+ if (current_type != last_type) {
+ if (last_type != 0) {
+ new_bios[new_bios_entry].size =
+ change_point[chgidx]->addr - last_addr;
+ /* move forward only if the new size was non-zero */
+ if (new_bios[new_bios_entry].size != 0)
+ if (++new_bios_entry >= E820MAX)
+ break; /* no more space left for new bios entries */
+ }
+ if (current_type != 0) {
+ new_bios[new_bios_entry].addr = change_point[chgidx]->addr;
+ new_bios[new_bios_entry].type = current_type;
+ last_addr=change_point[chgidx]->addr;
+ }
+ last_type = current_type;
+ }
+ }
+ new_nr = new_bios_entry; /* retain count for new bios entries */
+
+ /* copy new bios mapping into original location */
+ memcpy(biosmap, new_bios, new_nr*sizeof(struct e820entry));
+ *pnr_map = new_nr;
+
+ return 0;
+}
+
+/*
+ * Copy the BIOS e820 map into a safe place.
+ *
+ * Sanity-check it while we're at it..
+ *
+ * If we're lucky and live on a modern system, the setup code
+ * will have given us a memory map that we can use to properly
+ * set up memory. If we aren't, we'll fake a memory map.
+ *
+ * We check to see that the memory map contains at least 2 elements
+ * before we'll use it, because the detection code in setup.S may
+ * not be perfect and most every PC known to man has two memory
+ * regions: one from 0 to 640k, and one from 1mb up. (The IBM
+ * thinkpad 560x, for example, does not cooperate with the memory
+ * detection code.)
+ */
+static int __init copy_e820_map(struct e820entry * biosmap, int nr_map)
+{
+ /* Only one memory region (or negative)? Ignore it */
+ if (nr_map < 2)
+ return -1;
+
+ do {
+ unsigned long long start = biosmap->addr;
+ unsigned long long size = biosmap->size;
+ unsigned long long end = start + size;
+ unsigned long type = biosmap->type;
+
+ /* Overflow in 64 bits? Ignore the memory map. */
+ if (start > end)
+ return -1;
+
+ /*
+ * Some BIOSes claim RAM in the 640k - 1M region.
+ * Not right. Fix it up.
+ */
+ if (type == E820_RAM) {
+ if (start < 0x100000ULL && end > 0xA0000ULL) {
+ if (start < 0xA0000ULL)
+ add_memory_region(start, 0xA0000ULL-start, type);
+ if (end <= 0x100000ULL)
+ continue;
+ start = 0x100000ULL;
+ size = end - start;
+ }
+ }
+ add_memory_region(start, size, type);
+ } while (biosmap++,--nr_map);
+ return 0;
+}
+#endif
+
+#if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE)
+struct edd edd;
+#ifdef CONFIG_EDD_MODULE
+EXPORT_SYMBOL(edd);
+#endif
+/**
+ * copy_edd() - Copy the BIOS EDD information
+ * from boot_params into a safe place.
+ *
+ */
+static inline void copy_edd(void)
+{
+ memcpy(edd.mbr_signature, EDD_MBR_SIGNATURE, sizeof(edd.mbr_signature));
+ memcpy(edd.edd_info, EDD_BUF, sizeof(edd.edd_info));
+ edd.mbr_signature_nr = EDD_MBR_SIG_NR;
+ edd.edd_info_nr = EDD_NR;
+}
+#else
+static inline void copy_edd(void)
+{
+}
+#endif
+
+/*
+ * Do NOT EVER look at the BIOS memory size location.
+ * It does not work on many machines.
+ */
+#define LOWMEMSIZE() (0x9f000)
+
+static void __init parse_cmdline_early (char ** cmdline_p)
+{
+ char c = ' ', *to = command_line, *from = saved_command_line;
+ int len = 0;
+ int userdef = 0;
+
+ memcpy(saved_command_line, xen_start_info.cmd_line, MAX_CMDLINE);
+ /* Save unparsed command line copy for /proc/cmdline */
+ saved_command_line[COMMAND_LINE_SIZE-1] = '\0';
+
+ for (;;) {
+ /*
+ * "mem=nopentium" disables the 4MB page tables.
+ * "mem=XXX[kKmM]" defines a memory region from HIGH_MEM
+ * to <mem>, overriding the bios size.
+ * "memmap=XXX[KkmM]@XXX[KkmM]" defines a memory region from
+ * <start> to <start>+<mem>, overriding the bios size.
+ *
+ * HPA tells me bootloaders need to parse mem=, so no new
+ * option should be mem= [also see Documentation/i386/boot.txt]
+ */
+ if (c == ' ' && !memcmp(from, "mem=", 4)) {
+ if (to != command_line)
+ to--;
+ if (!memcmp(from+4, "nopentium", 9)) {
+ from += 9+4;
+ clear_bit(X86_FEATURE_PSE, boot_cpu_data.x86_capability);
+ disable_pse = 1;
+ } else {
+ /* If the user specifies memory size, we
+ * limit the BIOS-provided memory map to
+ * that size. exactmap can be used to specify
+ * the exact map. mem=number can be used to
+ * trim the existing memory map.
+ */
+ unsigned long long mem_size;
+
+ mem_size = memparse(from+4, &from);
++#if 0
+ limit_regions(mem_size);
+ userdef=1;
++#else
++ xen_override_max_pfn =
++ (unsigned long)(mem_size>>PAGE_SHIFT);
++#endif
+ }
+ }
+
+ if (c == ' ' && !memcmp(from, "memmap=", 7)) {
+ if (to != command_line)
+ to--;
+ if (!memcmp(from+7, "exactmap", 8)) {
+ from += 8+7;
+ e820.nr_map = 0;
+ userdef = 1;
+ } else {
+ /* If the user specifies memory size, we
+ * limit the BIOS-provided memory map to
+ * that size. exactmap can be used to specify
+ * the exact map. mem=number can be used to
+ * trim the existing memory map.
+ */
+ unsigned long long start_at, mem_size;
+
+ mem_size = memparse(from+7, &from);
+ if (*from == '@') {
+ start_at = memparse(from+1, &from);
+ add_memory_region(start_at, mem_size, E820_RAM);
+ } else if (*from == '#') {
+ start_at = memparse(from+1, &from);
+ add_memory_region(start_at, mem_size, E820_ACPI);
+ } else if (*from == '$') {
+ start_at = memparse(from+1, &from);
+ add_memory_region(start_at, mem_size, E820_RESERVED);
+ } else {
+ limit_regions(mem_size);
+ userdef=1;
+ }
+ }
+ }
+
+#ifdef CONFIG_X86_SMP
+ /*
+ * If the BIOS enumerates physical processors before logical,
+ * maxcpus=N at enumeration-time can be used to disable HT.
+ */
+ else if (!memcmp(from, "maxcpus=", 8)) {
+ extern unsigned int maxcpus;
+
+ maxcpus = simple_strtoul(from + 8, NULL, 0);
+ }
+#endif
+
+#ifdef CONFIG_ACPI_BOOT
+ /* "acpi=off" disables both ACPI table parsing and interpreter */
+ else if (!memcmp(from, "acpi=off", 8)) {
+ disable_acpi();
+ }
+
+ /* acpi=force to over-ride black-list */
+ else if (!memcmp(from, "acpi=force", 10)) {
+ acpi_force = 1;
+ acpi_ht = 1;
+ acpi_disabled = 0;
+ }
+
+ /* acpi=strict disables out-of-spec workarounds */
+ else if (!memcmp(from, "acpi=strict", 11)) {
+ acpi_strict = 1;
+ }
+
+ /* Limit ACPI just to boot-time to enable HT */
+ else if (!memcmp(from, "acpi=ht", 7)) {
+ if (!acpi_force)
+ disable_acpi();
+ acpi_ht = 1;
+ }
+
+ /* "pci=noacpi" disable ACPI IRQ routing and PCI scan */
+ else if (!memcmp(from, "pci=noacpi", 10)) {
+ acpi_disable_pci();
+ }
+ /* "acpi=noirq" disables ACPI interrupt routing */
+ else if (!memcmp(from, "acpi=noirq", 10)) {
+ acpi_noirq_set();
+ }
+
+ else if (!memcmp(from, "acpi_sci=edge", 13))
+ acpi_sci_flags.trigger = 1;
+
+ else if (!memcmp(from, "acpi_sci=level", 14))
+ acpi_sci_flags.trigger = 3;
+
+ else if (!memcmp(from, "acpi_sci=high", 13))
+ acpi_sci_flags.polarity = 1;
+
+ else if (!memcmp(from, "acpi_sci=low", 12))
+ acpi_sci_flags.polarity = 3;
+
+#ifdef CONFIG_X86_IO_APIC
+ else if (!memcmp(from, "acpi_skip_timer_override", 24))
+ acpi_skip_timer_override = 1;
+#endif
+
+#ifdef CONFIG_X86_LOCAL_APIC
+ /* disable IO-APIC */
+ else if (!memcmp(from, "noapic", 6))
+ disable_ioapic_setup();
+#endif /* CONFIG_X86_LOCAL_APIC */
+#endif /* CONFIG_ACPI_BOOT */
+
+ /*
+ * highmem=size forces highmem to be exactly 'size' bytes.
+ * This works even on boxes that have no highmem otherwise.
+ * This also works to reduce highmem size on bigger boxes.
+ */
+ if (c == ' ' && !memcmp(from, "highmem=", 8))
+ highmem_pages = memparse(from+8, &from) >> PAGE_SHIFT;
+
+ /*
+ * vmalloc=size forces the vmalloc area to be exactly 'size'
+ * bytes. This can be used to increase (or decrease) the
+ * vmalloc area - the default is 128m.
+ */
+ if (c == ' ' && !memcmp(from, "vmalloc=", 8))
+ __VMALLOC_RESERVE = memparse(from+8, &from);
+
+ c = *(from++);
+ if (!c)
+ break;
+ if (COMMAND_LINE_SIZE <= ++len)
+ break;
+ *(to++) = c;
+ }
+ *to = '\0';
+ *cmdline_p = command_line;
+ if (userdef) {
+ printk(KERN_INFO "user-defined physical RAM map:\n");
+ print_memory_map("user");
+ }
+}
+
++#if 0 /* !XEN */
+/*
+ * Callback for efi_memory_walk.
+ */
+static int __init
+efi_find_max_pfn(unsigned long start, unsigned long end, void *arg)
+{
+ unsigned long *max_pfn = arg, pfn;
+
+ if (start < end) {
+ pfn = PFN_UP(end -1);
+ if (pfn > *max_pfn)
+ *max_pfn = pfn;
+ }
+ return 0;
+}
+
+/*
+ * Find the highest page frame number we have available
+ */
+void __init find_max_pfn(void)
+{
+ int i;
+
+ max_pfn = 0;
+ if (efi_enabled) {
+ efi_memmap_walk(efi_find_max_pfn, &max_pfn);
+ return;
+ }
+
+ for (i = 0; i < e820.nr_map; i++) {
+ unsigned long start, end;
+ /* RAM? */
+ if (e820.map[i].type != E820_RAM)
+ continue;
+ start = PFN_UP(e820.map[i].addr);
+ end = PFN_DOWN(e820.map[i].addr + e820.map[i].size);
+ if (start >= end)
+ continue;
+ if (end > max_pfn)
+ max_pfn = end;
+ }
+}
++#else
++/* We don't use the fake e820 because we need to respond to user override. */
++void __init find_max_pfn(void)
++{
++ if ( xen_override_max_pfn < xen_start_info.nr_pages )
++ xen_override_max_pfn = xen_start_info.nr_pages;
++ max_pfn = xen_override_max_pfn;
++}
++#endif /* XEN */
+
+/*
+ * Determine low and high memory ranges:
+ */
+unsigned long __init find_max_low_pfn(void)
+{
+ unsigned long max_low_pfn;
+
+ max_low_pfn = max_pfn;
+ if (max_low_pfn > MAXMEM_PFN) {
+ if (highmem_pages == -1)
+ highmem_pages = max_pfn - MAXMEM_PFN;
+ if (highmem_pages + MAXMEM_PFN < max_pfn)
+ max_pfn = MAXMEM_PFN + highmem_pages;
+ if (highmem_pages + MAXMEM_PFN > max_pfn) {
+ printk("only %luMB highmem pages available, ignoring highmem size of %uMB.\n", pages_to_mb(max_pfn - MAXMEM_PFN), pages_to_mb(highmem_pages));
+ highmem_pages = 0;
+ }
+ max_low_pfn = MAXMEM_PFN;
+#ifndef CONFIG_HIGHMEM
+ /* Maximum memory usable is what is directly addressable */
+ printk(KERN_WARNING "Warning only %ldMB will be used.\n",
+ MAXMEM>>20);
+ if (max_pfn > MAX_NONPAE_PFN)
+ printk(KERN_WARNING "Use a PAE enabled kernel.\n");
+ else
+ printk(KERN_WARNING "Use a HIGHMEM enabled kernel.\n");
+ max_pfn = MAXMEM_PFN;
+#else /* !CONFIG_HIGHMEM */
+#ifndef CONFIG_X86_PAE
+ if (max_pfn > MAX_NONPAE_PFN) {
+ max_pfn = MAX_NONPAE_PFN;
+ printk(KERN_WARNING "Warning only 4GB will be used.\n");
+ printk(KERN_WARNING "Use a PAE enabled kernel.\n");
+ }
+#endif /* !CONFIG_X86_PAE */
+#endif /* !CONFIG_HIGHMEM */
+ } else {
+ if (highmem_pages == -1)
+ highmem_pages = 0;
+#ifdef CONFIG_HIGHMEM
+ if (highmem_pages >= max_pfn) {
+ printk(KERN_ERR "highmem size specified (%uMB) is bigger than pages available (%luMB)!.\n", pages_to_mb(highmem_pages), pages_to_mb(max_pfn));
+ highmem_pages = 0;
+ }
+ if (highmem_pages) {
+ if (max_low_pfn-highmem_pages < 64*1024*1024/PAGE_SIZE){
+ printk(KERN_ERR "highmem size %uMB results in smaller than 64MB lowmem, ignoring it.\n", pages_to_mb(highmem_pages));
+ highmem_pages = 0;
+ }
+ max_low_pfn -= highmem_pages;
+ }
+#else
+ if (highmem_pages)
+ printk(KERN_ERR "ignoring highmem size on non-highmem kernel!\n");
+#endif
+ }
+ return max_low_pfn;
+}
+
+#ifndef CONFIG_DISCONTIGMEM
+
+/*
+ * Free all available memory for boot time allocation. Used
+ * as a callback function by efi_memory_walk()
+ */
+
+static int __init
+free_available_memory(unsigned long start, unsigned long end, void *arg)
+{
+ /* check max_low_pfn */
+ if (start >= ((max_low_pfn + 1) << PAGE_SHIFT))
+ return 0;
+ if (end >= ((max_low_pfn + 1) << PAGE_SHIFT))
+ end = (max_low_pfn + 1) << PAGE_SHIFT;
+ if (start < end)
+ free_bootmem(start, end - start);
+
+ return 0;
+}
+/*
+ * Register fully available low RAM pages with the bootmem allocator.
+ */
+static void __init register_bootmem_low_pages(unsigned long max_low_pfn)
+{
+ int i;
+
+ if (efi_enabled) {
+ efi_memmap_walk(free_available_memory, NULL);
+ return;
+ }
+ for (i = 0; i < e820.nr_map; i++) {
+ unsigned long curr_pfn, last_pfn, size;
+ /*
+ * Reserve usable low memory
+ */
+ if (e820.map[i].type != E820_RAM)
+ continue;
+ /*
+ * We are rounding up the start address of usable memory:
+ */
+ curr_pfn = PFN_UP(e820.map[i].addr);
+ if (curr_pfn >= max_low_pfn)
+ continue;
+ /*
+ * ... and at the end of the usable range downwards:
+ */
+ last_pfn = PFN_DOWN(e820.map[i].addr + e820.map[i].size);
+
+ if (last_pfn > max_low_pfn)
+ last_pfn = max_low_pfn;
+
+ /*
+ * .. finally, did all the rounding and playing
+ * around just make the area go away?
+ */
+ if (last_pfn <= curr_pfn)
+ continue;
+
+ size = last_pfn - curr_pfn;
+ free_bootmem(PFN_PHYS(curr_pfn), PFN_PHYS(size));
+ }
+}
+
+/*
+ * workaround for Dell systems that neglect to reserve EBDA
+ */
+static void __init reserve_ebda_region(void)
+{
+ unsigned int addr;
+ addr = get_bios_ebda();
+ if (addr)
+ reserve_bootmem(addr, PAGE_SIZE);
+}
+
+static unsigned long __init setup_memory(void)
+{
+ unsigned long bootmap_size, start_pfn, max_low_pfn;
+
+ /*
+ * partially used pages are not usable - thus
+ * we are rounding upwards:
+ */
+ start_pfn = PFN_UP(__pa(xen_start_info.pt_base)) + xen_start_info.nr_pt_frames;
+
+ find_max_pfn();
+
+ max_low_pfn = find_max_low_pfn();
+
+#ifdef CONFIG_HIGHMEM
+ highstart_pfn = highend_pfn = max_pfn;
+ if (max_pfn > max_low_pfn) {
+ highstart_pfn = max_low_pfn;
+ }
+ printk(KERN_NOTICE "%ldMB HIGHMEM available.\n",
+ pages_to_mb(highend_pfn - highstart_pfn));
+#endif
+ printk(KERN_NOTICE "%ldMB LOWMEM available.\n",
+ pages_to_mb(max_low_pfn));
+ /*
+ * Initialize the boot-time allocator (with low memory only):
+ */
+ bootmap_size = init_bootmem(start_pfn, max_low_pfn);
+
+ register_bootmem_low_pages(max_low_pfn);
+
+ /*
+ * Reserve the bootmem bitmap itself as well. We do this in two
+ * steps (first step was init_bootmem()) because this catches
+ * the (very unlikely) case of us accidentally initializing the
+ * bootmem allocator with an invalid RAM area.
+ */
+ reserve_bootmem(HIGH_MEMORY, (PFN_PHYS(start_pfn) +
+ bootmap_size + PAGE_SIZE-1) - (HIGH_MEMORY));
+
+ /* reserve EBDA region, it's a 4K region */
+ reserve_ebda_region();
+
+ /* could be an AMD 768MPX chipset. Reserve a page before VGA to prevent
+ PCI prefetch into it (errata #56). Usually the page is reserved anyways,
+ unless you have no PS/2 mouse plugged in. */
+ if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
+ boot_cpu_data.x86 == 6)
+ reserve_bootmem(0xa0000 - 4096, 4096);
+
+#ifdef CONFIG_SMP
+ /*
+ * But first pinch a few for the stack/trampoline stuff
+ * FIXME: Don't need the extra page at 4K, but need to fix
+ * trampoline before removing it. (see the GDT stuff)
+ */
+ reserve_bootmem(PAGE_SIZE, PAGE_SIZE);
+#endif
+#ifdef CONFIG_ACPI_SLEEP
+ /*
+ * Reserve low memory region for sleep support.
+ */
+ acpi_reserve_bootmem();
+#endif
+#ifdef CONFIG_X86_FIND_SMP_CONFIG
+ /*
+ * Find and reserve possible boot-time SMP configuration:
+ */
+ find_smp_config();
+#endif
+
+#ifdef CONFIG_BLK_DEV_INITRD
+ if (xen_start_info.mod_start) {
+ if (INITRD_START + INITRD_SIZE <= (max_low_pfn << PAGE_SHIFT)) {
+ /*reserve_bootmem(INITRD_START, INITRD_SIZE);*/
+ initrd_start = INITRD_START + PAGE_OFFSET;
+ initrd_end = initrd_start+INITRD_SIZE;
+ initrd_below_start_ok = 1;
+ }
+ else {
+ printk(KERN_ERR "initrd extends beyond end of memory "
+ "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
+ INITRD_START + INITRD_SIZE,
+ max_low_pfn << PAGE_SHIFT);
+ initrd_start = 0;
+ }
+ }
+#endif
+
+ phys_to_machine_mapping = (unsigned long *)xen_start_info.mfn_list;
+
+ return max_low_pfn;
+}
+#else
+extern unsigned long setup_memory(void);
+#endif /* !CONFIG_DISCONTIGMEM */
+
+/*
+ * Request address space for all standard RAM and ROM resources
+ * and also for regions reported as reserved by the e820.
+ */
+static void __init
+legacy_init_iomem_resources(struct resource *code_resource, struct resource *data_resource)
+{
+ int i;
+
+#ifdef CONFIG_XEN_PRIVILEGED_GUEST
+ probe_roms();
+#endif
+ for (i = 0; i < e820.nr_map; i++) {
+ struct resource *res;
+ if (e820.map[i].addr + e820.map[i].size > 0x100000000ULL)
+ continue;
+ res = alloc_bootmem_low(sizeof(struct resource));
+ switch (e820.map[i].type) {
+ case E820_RAM: res->name = "System RAM"; break;
+ case E820_ACPI: res->name = "ACPI Tables"; break;
+ case E820_NVS: res->name = "ACPI Non-volatile Storage"; break;
+ default: res->name = "reserved";
+ }
+ res->start = e820.map[i].addr;
+ res->end = res->start + e820.map[i].size - 1;
+ res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
+ request_resource(&iomem_resource, res);
+ if (e820.map[i].type == E820_RAM) {
+ /*
+ * We don't know which RAM region contains kernel data,
+ * so we try it repeatedly and let the resource manager
+ * test it.
+ */
+ request_resource(res, code_resource);
+ request_resource(res, data_resource);
+ }
+ }
+}
+
+/*
+ * Request address space for all standard resources
+ */
+static void __init register_memory(unsigned long max_low_pfn)
+{
+ unsigned long low_mem_size;
+ int i;
+
+ if (efi_enabled)
+ efi_initialize_iomem_resources(&code_resource, &data_resource);
+ else
+ legacy_init_iomem_resources(&code_resource, &data_resource);
+
+ /* EFI systems may still have VGA */
+ request_resource(&iomem_resource, &video_ram_resource);
+
+ /* request I/O space for devices used on all i[345]86 PCs */
+ for (i = 0; i < STANDARD_IO_RESOURCES; i++)
+ request_resource(&ioport_resource, &standard_io_resources[i]);
+
+ /* Tell the PCI layer not to allocate too close to the RAM area.. */
+ low_mem_size = ((max_low_pfn << PAGE_SHIFT) + 0xfffff) & ~0xfffff;
+ if (low_mem_size > pci_mem_start)
+ pci_mem_start = low_mem_size;
+}
+
+/* Use inline assembly to define this because the nops are defined
+ as inline assembly strings in the include files and we cannot
+ get them easily into strings. */
+asm("\t.data\nintelnops: "
+ GENERIC_NOP1 GENERIC_NOP2 GENERIC_NOP3 GENERIC_NOP4 GENERIC_NOP5 GENERIC_NOP6
+ GENERIC_NOP7 GENERIC_NOP8);
+asm("\t.data\nk8nops: "
+ K8_NOP1 K8_NOP2 K8_NOP3 K8_NOP4 K8_NOP5 K8_NOP6
+ K8_NOP7 K8_NOP8);
+asm("\t.data\nk7nops: "
+ K7_NOP1 K7_NOP2 K7_NOP3 K7_NOP4 K7_NOP5 K7_NOP6
+ K7_NOP7 K7_NOP8);
+
+extern unsigned char intelnops[], k8nops[], k7nops[];
+static unsigned char *intel_nops[ASM_NOP_MAX+1] = {
+ NULL,
+ intelnops,
+ intelnops + 1,
+ intelnops + 1 + 2,
+ intelnops + 1 + 2 + 3,
+ intelnops + 1 + 2 + 3 + 4,
+ intelnops + 1 + 2 + 3 + 4 + 5,
+ intelnops + 1 + 2 + 3 + 4 + 5 + 6,
+ intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
+};
+static unsigned char *k8_nops[ASM_NOP_MAX+1] = {
+ NULL,
+ k8nops,
+ k8nops + 1,
+ k8nops + 1 + 2,
+ k8nops + 1 + 2 + 3,
+ k8nops + 1 + 2 + 3 + 4,
+ k8nops + 1 + 2 + 3 + 4 + 5,
+ k8nops + 1 + 2 + 3 + 4 + 5 + 6,
+ k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
+};
+static unsigned char *k7_nops[ASM_NOP_MAX+1] = {
+ NULL,
+ k7nops,
+ k7nops + 1,
+ k7nops + 1 + 2,
+ k7nops + 1 + 2 + 3,
+ k7nops + 1 + 2 + 3 + 4,
+ k7nops + 1 + 2 + 3 + 4 + 5,
+ k7nops + 1 + 2 + 3 + 4 + 5 + 6,
+ k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
+};
+static struct nop {
+ int cpuid;
+ unsigned char **noptable;
+} noptypes[] = {
+ { X86_FEATURE_K8, k8_nops },
+ { X86_FEATURE_K7, k7_nops },
+ { -1, NULL }
+};
+
+/* Replace instructions with better alternatives for this CPU type.
+
+ This runs before SMP is initialized to avoid SMP problems with
+ self modifying code. This implies that assymetric systems where
+ APs have less capabilities than the boot processor are not handled.
+ In this case boot with "noreplacement". */
+void apply_alternatives(void *start, void *end)
+{
+ struct alt_instr *a;
+ int diff, i, k;
+ unsigned char **noptable = intel_nops;
+ for (i = 0; noptypes[i].cpuid >= 0; i++) {
+ if (boot_cpu_has(noptypes[i].cpuid)) {
+ noptable = noptypes[i].noptable;
+ break;
+ }
+ }
+ for (a = start; (void *)a < end; a++) {
+ if (!boot_cpu_has(a->cpuid))
+ continue;
+ BUG_ON(a->replacementlen > a->instrlen);
+ memcpy(a->instr, a->replacement, a->replacementlen);
+ diff = a->instrlen - a->replacementlen;
+ /* Pad the rest with nops */
+ for (i = a->replacementlen; diff > 0; diff -= k, i += k) {
+ k = diff;
+ if (k > ASM_NOP_MAX)
+ k = ASM_NOP_MAX;
+ memcpy(a->instr + i, noptable[k], k);
+ }
+ }
+}
+
+static int no_replacement __initdata = 0;
+
+void __init alternative_instructions(void)
+{
+ extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
+ if (no_replacement)
+ return;
+ apply_alternatives(__alt_instructions, __alt_instructions_end);
+}
+
+static int __init noreplacement_setup(char *s)
+{
+ no_replacement = 1;
+ return 0;
+}
+
+__setup("noreplacement", noreplacement_setup);
+
+static char * __init machine_specific_memory_setup(void);
+
+/*
+ * Determine if we were loaded by an EFI loader. If so, then we have also been
+ * passed the efi memmap, systab, etc., so we should use these data structures
+ * for initialization. Note, the efi init code path is determined by the
+ * global efi_enabled. This allows the same kernel image to be used on existing
+ * systems (with a traditional BIOS) as well as on EFI systems.
+ */
+void __init setup_arch(char **cmdline_p)
+{
+ int i,j;
+
+ unsigned long max_low_pfn;
+
+ /* Force a quick death if the kernel panics. */
+ extern int panic_timeout;
+ if ( panic_timeout == 0 )
+ panic_timeout = 1;
+
+ HYPERVISOR_vm_assist(VMASST_CMD_enable,
+ VMASST_TYPE_4gb_segments);
+
+ memcpy(&boot_cpu_data, &new_cpu_data, sizeof(new_cpu_data));
+ early_cpu_init();
+
+ /*
+ * FIXME: This isn't an official loader_type right
+ * now but does currently work with elilo.
+ * If we were configured as an EFI kernel, check to make
+ * sure that we were loaded correctly from elilo and that
+ * the system table is valid. If not, then initialize normally.
+ */
+#ifdef CONFIG_EFI
+ if ((LOADER_TYPE == 0x50) && EFI_SYSTAB)
+ efi_enabled = 1;
+#endif
+
+ ROOT_DEV = MKDEV(RAMDISK_MAJOR,0); /*old_decode_dev(ORIG_ROOT_DEV);*/
+ drive_info = DRIVE_INFO;
+ screen_info = SCREEN_INFO;
+ edid_info = EDID_INFO;
+ apm_info.bios = APM_BIOS_INFO;
+ ist_info = IST_INFO;
+ saved_videomode = VIDEO_MODE;
+ if( SYS_DESC_TABLE.length != 0 ) {
+ MCA_bus = SYS_DESC_TABLE.table[3] &0x2;
+ machine_id = SYS_DESC_TABLE.table[0];
+ machine_submodel_id = SYS_DESC_TABLE.table[1];
+ BIOS_revision = SYS_DESC_TABLE.table[2];
+ }
+ aux_device_present = AUX_DEVICE_INFO;
+
+#ifdef CONFIG_XEN_PHYSDEV_ACCESS
+ /* This is drawn from a dump from vgacon:startup in standard Linux. */
+ screen_info.orig_video_mode = 3;
+ screen_info.orig_video_isVGA = 1;
+ screen_info.orig_video_lines = 25;
+ screen_info.orig_video_cols = 80;
+ screen_info.orig_video_ega_bx = 3;
+ screen_info.orig_video_points = 16;
+#endif
+
+#ifdef CONFIG_BLK_DEV_RAM
+ rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK;
+ rd_prompt = ((RAMDISK_FLAGS & RAMDISK_PROMPT_FLAG) != 0);
+ rd_doload = ((RAMDISK_FLAGS & RAMDISK_LOAD_FLAG) != 0);
+#endif
+ ARCH_SETUP
+ if (efi_enabled)
+ efi_init();
+ else {
+ printk(KERN_INFO "BIOS-provided physical RAM map:\n");
+ print_memory_map(machine_specific_memory_setup());
+ }
+
+ copy_edd();
+
+ if (!MOUNT_ROOT_RDONLY)
+ root_mountflags &= ~MS_RDONLY;
+ init_mm.start_code = (unsigned long) _text;
+ init_mm.end_code = (unsigned long) _etext;
+ init_mm.end_data = (unsigned long) _edata;
+ init_mm.brk = (PFN_UP(__pa(xen_start_info.pt_base)) + xen_start_info.nr_pt_frames) << PAGE_SHIFT;
+
+ /* XEN: This is nonsense: kernel may not even be contiguous in RAM. */
+ /*code_resource.start = virt_to_phys(_text);*/
+ /*code_resource.end = virt_to_phys(_etext)-1;*/
+ /*data_resource.start = virt_to_phys(_etext);*/
+ /*data_resource.end = virt_to_phys(_edata)-1;*/
+
+ parse_cmdline_early(cmdline_p);
+
+ max_low_pfn = setup_memory();
+
+ /*
+ * NOTE: before this point _nobody_ is allowed to allocate
+ * any memory using the bootmem allocator. Although the
+ * alloctor is now initialised only the first 8Mb of the kernel
+ * virtual address space has been mapped. All allocations before
+ * paging_init() has completed must use the alloc_bootmem_low_pages()
+ * variant (which allocates DMA'able memory) and care must be taken
+ * not to exceed the 8Mb limit.
+ */
+
+#ifdef CONFIG_SMP
+ smp_alloc_memory(); /* AP processor realmode stacks in low memory*/
+#endif
+ paging_init();
+
++ /* Make sure we have a large enough P->M table. */
++ if (max_pfn > xen_start_info.nr_pages) {
++ phys_to_machine_mapping = alloc_bootmem_low_pages(
++ max_pfn * sizeof(unsigned long));
++ memset(phys_to_machine_mapping, ~0,
++ max_pfn * sizeof(unsigned long));
++ memcpy(phys_to_machine_mapping,
++ (unsigned long *)xen_start_info.mfn_list,
++ xen_start_info.nr_pages * sizeof(unsigned long));
++ free_bootmem(
++ __pa(xen_start_info.mfn_list),
++ PFN_PHYS(PFN_UP(xen_start_info.nr_pages *
++ sizeof(unsigned long))));
++ }
++
+ pfn_to_mfn_frame_list = alloc_bootmem_low_pages(PAGE_SIZE);
+ for ( i=0, j=0; i < max_pfn; i+=(PAGE_SIZE/sizeof(unsigned long)), j++ )
+ {
+ pfn_to_mfn_frame_list[j] =
+ virt_to_machine(&phys_to_machine_mapping[i]) >> PAGE_SHIFT;
+ }
+ HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list =
+ virt_to_machine(pfn_to_mfn_frame_list) >> PAGE_SHIFT;
+
+
+ /*
+ * NOTE: at this point the bootmem allocator is fully available.
+ */
+
+#ifdef CONFIG_EARLY_PRINTK
+ {
+ char *s = strstr(*cmdline_p, "earlyprintk=");
+ if (s) {
+ extern void setup_early_printk(char *);
+
+ setup_early_printk(s);
+ printk("early console enabled\n");
+ }
+ }
+#endif
+
+
+ dmi_scan_machine();
+
+#ifdef CONFIG_X86_GENERICARCH
+ generic_apic_probe(*cmdline_p);
+#endif
+ if (efi_enabled)
+ efi_map_memmap();
+
+ /*
+ * Parse the ACPI tables for possible boot-time SMP configuration.
+ */
+ acpi_boot_init();
+
+#ifdef CONFIG_X86_LOCAL_APIC
+ if (smp_found_config)
+ get_smp_config();
+#endif
+
+ register_memory(max_low_pfn);
+
+ /* If we are a privileged guest OS then we should request IO privs. */
+ if (xen_start_info.flags & SIF_PRIVILEGED) {
+ dom0_op_t op;
+ op.cmd = DOM0_IOPL;
+ op.u.iopl.domain = DOMID_SELF;
+ op.u.iopl.iopl = 1;
+ if (HYPERVISOR_dom0_op(&op) != 0)
+ panic("Unable to obtain IOPL, despite SIF_PRIVILEGED");
+ current->thread.io_pl = 1;
+ }
+
+ if (xen_start_info.flags & SIF_INITDOMAIN) {
+ if (!(xen_start_info.flags & SIF_PRIVILEGED))
+ panic("Xen granted us console access "
+ "but not privileged status");
+
+#ifdef CONFIG_VT
+#if defined(CONFIG_VGA_CONSOLE)
+ if (!efi_enabled ||
+ (efi_mem_type(0xa0000) != EFI_CONVENTIONAL_MEMORY))
+ conswitchp = &vga_con;
+#elif defined(CONFIG_DUMMY_CONSOLE)
+ conswitchp = &dummy_con;
+#endif
+#endif
+ } else {
+#ifdef CONFIG_XEN_PRIVILEGED_GUEST
+ extern const struct consw xennull_con;
+ extern int console_use_vt;
+#if defined(CONFIG_VGA_CONSOLE)
+ /* disable VGA driver */
+ ORIG_VIDEO_ISVGA = VIDEO_TYPE_VLFB;
+#endif
+ conswitchp = &xennull_con;
+ console_use_vt = 0;
+#endif
+ }
+}
+
+#include "setup_arch_post.h"
+/*
+ * Local Variables:
+ * mode:c
+ * c-file-style:"k&r"
+ * c-basic-offset:8
+ * End:
+ */
--- /dev/null
- int ret;
+/******************************************************************************
+ * mm/hypervisor.c
+ *
+ * Update page tables via the hypervisor.
+ *
+ * Copyright (c) 2002-2004, K A Fraser
+ *
+ * This file may be distributed separately from the Linux kernel, or
+ * incorporated into other software packages, subject to the following license:
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <linux/config.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/vmalloc.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm-xen/hypervisor.h>
+#include <asm-xen/multicall.h>
++#include <asm-xen/balloon.h>
+
+/*
+ * This suffices to protect us if we ever move to SMP domains.
+ * Further, it protects us against interrupts. At the very least, this is
+ * required for the network driver which flushes the update queue before
+ * pushing new receive buffers.
+ */
+static spinlock_t update_lock = SPIN_LOCK_UNLOCKED;
+
+/* Linux 2.6 isn't using the traditional batched interface. */
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
+#define QUEUE_SIZE 2048
+#define pte_offset_kernel pte_offset
+#else
+#define QUEUE_SIZE 128
+#endif
+
+static mmu_update_t update_queue[QUEUE_SIZE];
+unsigned int mmu_update_queue_idx = 0;
+#define idx mmu_update_queue_idx
+
+/*
+ * MULTICALL_flush_page_update_queue:
+ * This is a version of the flush which queues as part of a multicall.
+ */
+void MULTICALL_flush_page_update_queue(void)
+{
+ unsigned long flags;
+ unsigned int _idx;
+ spin_lock_irqsave(&update_lock, flags);
+ if ( (_idx = idx) != 0 )
+ {
+ idx = 0;
+ wmb(); /* Make sure index is cleared first to avoid double updates. */
+ queue_multicall3(__HYPERVISOR_mmu_update,
+ (unsigned long)update_queue,
+ (unsigned long)_idx,
+ (unsigned long)NULL);
+ }
+ spin_unlock_irqrestore(&update_lock, flags);
+}
+
+static inline void __flush_page_update_queue(void)
+{
+ unsigned int _idx = idx;
+ idx = 0;
+ wmb(); /* Make sure index is cleared first to avoid double updates. */
+ if ( unlikely(HYPERVISOR_mmu_update(update_queue, _idx, NULL) < 0) )
+ {
+ printk(KERN_ALERT "Failed to execute MMU updates.\n");
+ BUG();
+ }
+}
+
+void _flush_page_update_queue(void)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&update_lock, flags);
+ if ( idx != 0 ) __flush_page_update_queue();
+ spin_unlock_irqrestore(&update_lock, flags);
+}
+
+static inline void increment_index(void)
+{
+ idx++;
+ if ( unlikely(idx == QUEUE_SIZE) ) __flush_page_update_queue();
+}
+
+static inline void increment_index_and_flush(void)
+{
+ idx++;
+ __flush_page_update_queue();
+}
+
+void queue_l1_entry_update(pte_t *ptr, unsigned long val)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&update_lock, flags);
+ update_queue[idx].ptr = virt_to_machine(ptr);
+ update_queue[idx].val = val;
+ increment_index();
+ spin_unlock_irqrestore(&update_lock, flags);
+}
+
+void queue_l2_entry_update(pmd_t *ptr, unsigned long val)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&update_lock, flags);
+ update_queue[idx].ptr = virt_to_machine(ptr);
+ update_queue[idx].val = val;
+ increment_index();
+ spin_unlock_irqrestore(&update_lock, flags);
+}
+
+void queue_pt_switch(unsigned long ptr)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&update_lock, flags);
+ update_queue[idx].ptr = phys_to_machine(ptr);
+ update_queue[idx].ptr |= MMU_EXTENDED_COMMAND;
+ update_queue[idx].val = MMUEXT_NEW_BASEPTR;
+ increment_index();
+ spin_unlock_irqrestore(&update_lock, flags);
+}
+
+void queue_tlb_flush(void)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&update_lock, flags);
+ update_queue[idx].ptr = MMU_EXTENDED_COMMAND;
+ update_queue[idx].val = MMUEXT_TLB_FLUSH;
+ increment_index();
+ spin_unlock_irqrestore(&update_lock, flags);
+}
+
+void queue_invlpg(unsigned long ptr)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&update_lock, flags);
+ update_queue[idx].ptr = MMU_EXTENDED_COMMAND;
+ update_queue[idx].ptr |= ptr & PAGE_MASK;
+ update_queue[idx].val = MMUEXT_INVLPG;
+ increment_index();
+ spin_unlock_irqrestore(&update_lock, flags);
+}
+
+void queue_pgd_pin(unsigned long ptr)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&update_lock, flags);
+ update_queue[idx].ptr = phys_to_machine(ptr);
+ update_queue[idx].ptr |= MMU_EXTENDED_COMMAND;
+ update_queue[idx].val = MMUEXT_PIN_L2_TABLE;
+ increment_index();
+ spin_unlock_irqrestore(&update_lock, flags);
+}
+
+void queue_pgd_unpin(unsigned long ptr)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&update_lock, flags);
+ update_queue[idx].ptr = phys_to_machine(ptr);
+ update_queue[idx].ptr |= MMU_EXTENDED_COMMAND;
+ update_queue[idx].val = MMUEXT_UNPIN_TABLE;
+ increment_index();
+ spin_unlock_irqrestore(&update_lock, flags);
+}
+
+void queue_pte_pin(unsigned long ptr)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&update_lock, flags);
+ update_queue[idx].ptr = phys_to_machine(ptr);
+ update_queue[idx].ptr |= MMU_EXTENDED_COMMAND;
+ update_queue[idx].val = MMUEXT_PIN_L1_TABLE;
+ increment_index();
+ spin_unlock_irqrestore(&update_lock, flags);
+}
+
+void queue_pte_unpin(unsigned long ptr)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&update_lock, flags);
+ update_queue[idx].ptr = phys_to_machine(ptr);
+ update_queue[idx].ptr |= MMU_EXTENDED_COMMAND;
+ update_queue[idx].val = MMUEXT_UNPIN_TABLE;
+ increment_index();
+ spin_unlock_irqrestore(&update_lock, flags);
+}
+
+void queue_set_ldt(unsigned long ptr, unsigned long len)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&update_lock, flags);
+ update_queue[idx].ptr = MMU_EXTENDED_COMMAND | ptr;
+ update_queue[idx].val = MMUEXT_SET_LDT | (len << MMUEXT_CMD_SHIFT);
+ increment_index();
+ spin_unlock_irqrestore(&update_lock, flags);
+}
+
+void queue_machphys_update(unsigned long mfn, unsigned long pfn)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&update_lock, flags);
+ update_queue[idx].ptr = (mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE;
+ update_queue[idx].val = pfn;
+ increment_index();
+ spin_unlock_irqrestore(&update_lock, flags);
+}
+
+/* queue and flush versions of the above */
+void xen_l1_entry_update(pte_t *ptr, unsigned long val)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&update_lock, flags);
+ update_queue[idx].ptr = virt_to_machine(ptr);
+ update_queue[idx].val = val;
+ increment_index_and_flush();
+ spin_unlock_irqrestore(&update_lock, flags);
+}
+
+void xen_l2_entry_update(pmd_t *ptr, unsigned long val)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&update_lock, flags);
+ update_queue[idx].ptr = virt_to_machine(ptr);
+ update_queue[idx].val = val;
+ increment_index_and_flush();
+ spin_unlock_irqrestore(&update_lock, flags);
+}
+
+void xen_pt_switch(unsigned long ptr)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&update_lock, flags);
+ update_queue[idx].ptr = phys_to_machine(ptr);
+ update_queue[idx].ptr |= MMU_EXTENDED_COMMAND;
+ update_queue[idx].val = MMUEXT_NEW_BASEPTR;
+ increment_index_and_flush();
+ spin_unlock_irqrestore(&update_lock, flags);
+}
+
+void xen_tlb_flush(void)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&update_lock, flags);
+ update_queue[idx].ptr = MMU_EXTENDED_COMMAND;
+ update_queue[idx].val = MMUEXT_TLB_FLUSH;
+ increment_index_and_flush();
+ spin_unlock_irqrestore(&update_lock, flags);
+}
+
+void xen_invlpg(unsigned long ptr)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&update_lock, flags);
+ update_queue[idx].ptr = MMU_EXTENDED_COMMAND;
+ update_queue[idx].ptr |= ptr & PAGE_MASK;
+ update_queue[idx].val = MMUEXT_INVLPG;
+ increment_index_and_flush();
+ spin_unlock_irqrestore(&update_lock, flags);
+}
+
+void xen_pgd_pin(unsigned long ptr)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&update_lock, flags);
+ update_queue[idx].ptr = phys_to_machine(ptr);
+ update_queue[idx].ptr |= MMU_EXTENDED_COMMAND;
+ update_queue[idx].val = MMUEXT_PIN_L2_TABLE;
+ increment_index_and_flush();
+ spin_unlock_irqrestore(&update_lock, flags);
+}
+
+void xen_pgd_unpin(unsigned long ptr)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&update_lock, flags);
+ update_queue[idx].ptr = phys_to_machine(ptr);
+ update_queue[idx].ptr |= MMU_EXTENDED_COMMAND;
+ update_queue[idx].val = MMUEXT_UNPIN_TABLE;
+ increment_index_and_flush();
+ spin_unlock_irqrestore(&update_lock, flags);
+}
+
+void xen_pte_pin(unsigned long ptr)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&update_lock, flags);
+ update_queue[idx].ptr = phys_to_machine(ptr);
+ update_queue[idx].ptr |= MMU_EXTENDED_COMMAND;
+ update_queue[idx].val = MMUEXT_PIN_L1_TABLE;
+ increment_index_and_flush();
+ spin_unlock_irqrestore(&update_lock, flags);
+}
+
+void xen_pte_unpin(unsigned long ptr)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&update_lock, flags);
+ update_queue[idx].ptr = phys_to_machine(ptr);
+ update_queue[idx].ptr |= MMU_EXTENDED_COMMAND;
+ update_queue[idx].val = MMUEXT_UNPIN_TABLE;
+ increment_index_and_flush();
+ spin_unlock_irqrestore(&update_lock, flags);
+}
+
+void xen_set_ldt(unsigned long ptr, unsigned long len)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&update_lock, flags);
+ update_queue[idx].ptr = MMU_EXTENDED_COMMAND | ptr;
+ update_queue[idx].val = MMUEXT_SET_LDT | (len << MMUEXT_CMD_SHIFT);
+ increment_index_and_flush();
+ spin_unlock_irqrestore(&update_lock, flags);
+}
+
+void xen_machphys_update(unsigned long mfn, unsigned long pfn)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&update_lock, flags);
+ update_queue[idx].ptr = (mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE;
+ update_queue[idx].val = pfn;
+ increment_index_and_flush();
+ spin_unlock_irqrestore(&update_lock, flags);
+}
+
+#ifdef CONFIG_XEN_PHYSDEV_ACCESS
+
+unsigned long allocate_empty_lowmem_region(unsigned long pages)
+{
+ pgd_t *pgd;
+ pmd_t *pmd;
+ pte_t *pte;
+ unsigned long *pfn_array;
+ unsigned long vstart;
+ unsigned long i;
- ret = HYPERVISOR_dom_mem_op(MEMOP_decrease_reservation,
- pfn_array, 1<<order, 0);
- if ( unlikely(ret != (1<<order)) )
- {
- printk(KERN_WARNING "Unable to reduce memory reservation (%d)\n", ret);
- BUG();
- }
+ unsigned int order = get_order(pages*PAGE_SIZE);
+
+ vstart = __get_free_pages(GFP_KERNEL, order);
+ if ( vstart == 0 )
+ return 0UL;
+
+ scrub_pages(vstart, 1 << order);
+
+ pfn_array = vmalloc((1<<order) * sizeof(*pfn_array));
+ if ( pfn_array == NULL )
+ BUG();
+
+ for ( i = 0; i < (1<<order); i++ )
+ {
+ pgd = pgd_offset_k( (vstart + (i*PAGE_SIZE)));
+ pmd = pmd_offset(pgd, (vstart + (i*PAGE_SIZE)));
+ pte = pte_offset_kernel(pmd, (vstart + (i*PAGE_SIZE)));
+ pfn_array[i] = pte->pte_low >> PAGE_SHIFT;
+ queue_l1_entry_update(pte, 0);
+ phys_to_machine_mapping[__pa(vstart)>>PAGE_SHIFT] = INVALID_P2M_ENTRY;
+ }
+
+ /* Flush updates through and flush the TLB. */
+ xen_tlb_flush();
+
- void deallocate_lowmem_region(unsigned long vstart, unsigned long pages)
- {
- pgd_t *pgd;
- pmd_t *pmd;
- pte_t *pte;
- unsigned long *pfn_array;
- unsigned long i;
- int ret;
- unsigned int order = get_order(pages*PAGE_SIZE);
-
- pfn_array = vmalloc((1<<order) * sizeof(*pfn_array));
- if ( pfn_array == NULL )
- BUG();
-
- ret = HYPERVISOR_dom_mem_op(MEMOP_increase_reservation,
- pfn_array, 1<<order, 0);
- if ( unlikely(ret != (1<<order)) )
- {
- printk(KERN_WARNING "Unable to increase memory reservation (%d)\n",
- ret);
- BUG();
- }
-
- for ( i = 0; i < (1<<order); i++ )
- {
- pgd = pgd_offset_k( (vstart + (i*PAGE_SIZE)));
- pmd = pmd_offset(pgd, (vstart + (i*PAGE_SIZE)));
- pte = pte_offset_kernel(pmd, (vstart + (i*PAGE_SIZE)));
- queue_l1_entry_update(pte, (pfn_array[i]<<PAGE_SHIFT)|__PAGE_KERNEL);
- queue_machphys_update(pfn_array[i], __pa(vstart)>>PAGE_SHIFT);
- phys_to_machine_mapping[__pa(vstart)>>PAGE_SHIFT] = pfn_array[i];
- }
-
- flush_page_update_queue();
-
- vfree(pfn_array);
-
- free_pages(vstart, order);
- }
-
++ balloon_put_pages(pfn_array, 1 << order);
+
+ vfree(pfn_array);
+
+ return vstart;
+}
+
+#endif /* CONFIG_XEN_PHYSDEV_ACCESS */
--- /dev/null
- void __init protect_page(pgd_t *pgd, void *page, int mode)
- {
- pmd_t *pmd;
- pte_t *pte;
- unsigned long addr;
-
- addr = (unsigned long)page;
- pgd += pgd_index(addr);
- pmd = pmd_offset(pgd, addr);
- pte = pte_offset_kernel(pmd, addr);
- if (!pte_present(*pte))
- return;
- queue_l1_entry_update(pte, mode ? pte_val_ma(*pte) & ~_PAGE_RW :
- pte_val_ma(*pte) | _PAGE_RW);
- }
-
- void __init protect_pagetable(pgd_t *dpgd, pgd_t *spgd, int mode)
- {
- pmd_t *pmd;
- pte_t *pte;
- int pgd_idx, pmd_idx;
-
- protect_page(dpgd, spgd, mode);
-
- for (pgd_idx = 0; pgd_idx < PTRS_PER_PGD_NO_HV; spgd++, pgd_idx++) {
- pmd = pmd_offset(spgd, 0);
- if (pmd_none(*pmd))
- continue;
- for (pmd_idx = 0; pmd_idx < PTRS_PER_PMD; pmd++, pmd_idx++) {
- pte = pte_offset_kernel(pmd, 0);
- protect_page(dpgd, pte, mode);
- }
- }
- }
-
+/*
+ * linux/arch/i386/mm/init.c
+ *
+ * Copyright (C) 1995 Linus Torvalds
+ *
+ * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/ptrace.h>
+#include <linux/mman.h>
+#include <linux/mm.h>
+#include <linux/hugetlb.h>
+#include <linux/swap.h>
+#include <linux/smp.h>
+#include <linux/init.h>
+#include <linux/highmem.h>
+#include <linux/pagemap.h>
+#include <linux/bootmem.h>
+#include <linux/slab.h>
+#include <linux/proc_fs.h>
+#include <linux/efi.h>
+
+#include <asm/processor.h>
+#include <asm/system.h>
+#include <asm/uaccess.h>
+#include <asm/pgtable.h>
+#include <asm/dma.h>
+#include <asm/fixmap.h>
+#include <asm/e820.h>
+#include <asm/apic.h>
+#include <asm/tlb.h>
+#include <asm/tlbflush.h>
+#include <asm/sections.h>
+#include <asm-xen/hypervisor.h>
+
+unsigned int __VMALLOC_RESERVE = 128 << 20;
+
+DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
+unsigned long highstart_pfn, highend_pfn;
+
+static int noinline do_test_wp_bit(void);
+
+/*
+ * Creates a middle page table and puts a pointer to it in the
+ * given global directory entry. This only returns the gd entry
+ * in non-PAE compilation mode, since the middle layer is folded.
+ */
+static pmd_t * __init one_md_table_init(pgd_t *pgd)
+{
+ pmd_t *pmd_table;
+
+#ifdef CONFIG_X86_PAE
+ pmd_table = (pmd_t *) alloc_bootmem_low_pages(PAGE_SIZE);
+ set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
+ if (pmd_table != pmd_offset(pgd, 0))
+ BUG();
+#else
+ pmd_table = pmd_offset(pgd, 0);
+#endif
+
+ return pmd_table;
+}
+
+/*
+ * Create a page table and place a pointer to it in a middle page
+ * directory entry.
+ */
+static pte_t * __init one_page_table_init(pmd_t *pmd)
+{
+ if (pmd_none(*pmd)) {
+ pte_t *page_table = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
++ make_page_readonly(page_table);
+ set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
+ if (page_table != pte_offset_kernel(pmd, 0))
+ BUG();
+
+ return page_table;
+ }
+
+ return pte_offset_kernel(pmd, 0);
+}
+
+/*
+ * This function initializes a certain range of kernel virtual memory
+ * with new bootmem page tables, everywhere page tables are missing in
+ * the given range.
+ */
+
+/*
+ * NOTE: The pagetables are allocated contiguous on the physical space
+ * so we can cache the place of the first one and move around without
+ * checking the pgd every time.
+ */
+static void __init page_table_range_init (unsigned long start, unsigned long end, pgd_t *pgd_base)
+{
+ pgd_t *pgd;
+ pmd_t *pmd;
+ int pgd_idx, pmd_idx;
+ unsigned long vaddr;
+
+ vaddr = start;
+ pgd_idx = pgd_index(vaddr);
+ pmd_idx = pmd_index(vaddr);
+ pgd = pgd_base + pgd_idx;
+
+ for ( ; (pgd_idx < PTRS_PER_PGD_NO_HV) && (vaddr != end); pgd++, pgd_idx++) {
+ if (pgd_none(*pgd))
+ one_md_table_init(pgd);
+
+ pmd = pmd_offset(pgd, vaddr);
+ for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end); pmd++, pmd_idx++) {
+ if (pmd_none(*pmd))
+ one_page_table_init(pmd);
+
+ vaddr += PMD_SIZE;
+ }
+ pmd_idx = 0;
+ }
+}
+
- for (; pte_ofs < PTRS_PER_PTE && pfn < max_low_pfn; pte++, pfn++, pte_ofs++) {
+static inline int is_kernel_text(unsigned long addr)
+{
+ if (addr >= (unsigned long)_stext && addr <= (unsigned long)__init_end)
+ return 1;
+ return 0;
+}
+
+/*
+ * This maps the physical memory to kernel virtual address space, a total
+ * of max_low_pfn pages, by creating page tables starting from address
+ * PAGE_OFFSET.
+ */
+static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
+{
+ unsigned long pfn;
+ pgd_t *pgd;
+ pmd_t *pmd;
+ pte_t *pte;
+ int pgd_idx, pmd_idx, pte_ofs;
+
++ unsigned long max_ram_pfn = xen_start_info.nr_pages;
++ if (max_ram_pfn > max_low_pfn)
++ max_ram_pfn = max_low_pfn;
++
+ pgd_idx = pgd_index(PAGE_OFFSET);
+ pgd = pgd_base + pgd_idx;
+ pfn = 0;
+ pmd_idx = pmd_index(PAGE_OFFSET);
+ pte_ofs = pte_index(PAGE_OFFSET);
+
+ for (; pgd_idx < PTRS_PER_PGD_NO_HV; pgd++, pgd_idx++) {
+ pmd = one_md_table_init(pgd);
+ if (pfn >= max_low_pfn)
+ continue;
+ pmd += pmd_idx;
+ for (; pmd_idx < PTRS_PER_PMD && pfn < max_low_pfn; pmd++, pmd_idx++) {
+ unsigned int address = pfn * PAGE_SIZE + PAGE_OFFSET;
+
+ /* Map with big pages if possible, otherwise create normal page tables. */
+ if (cpu_has_pse) {
+ unsigned int address2 = (pfn + PTRS_PER_PTE - 1) * PAGE_SIZE + PAGE_OFFSET + PAGE_SIZE-1;
+
+ if (is_kernel_text(address) || is_kernel_text(address2))
+ set_pmd(pmd, pfn_pmd(pfn, PAGE_KERNEL_LARGE_EXEC));
+ else
+ set_pmd(pmd, pfn_pmd(pfn, PAGE_KERNEL_LARGE));
+ pfn += PTRS_PER_PTE;
+ } else {
+ pte = one_page_table_init(pmd);
+
+ pte += pte_ofs;
- __free_page(page);
++ /* XEN: Only map initial RAM allocation. */
++ for (; pte_ofs < PTRS_PER_PTE && pfn < max_ram_pfn; pte++, pfn++, pte_ofs++) {
++ if (pte_present(*pte))
++ continue;
+ if (is_kernel_text(address))
+ set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC));
+ else
+ set_pte(pte, pfn_pte(pfn, PAGE_KERNEL));
+ }
+ pte_ofs = 0;
+ }
+ flush_page_update_queue();
+ }
+ pmd_idx = 0;
+ }
+}
+
+static inline int page_kills_ppro(unsigned long pagenr)
+{
+ if (pagenr >= 0x70000 && pagenr <= 0x7003F)
+ return 1;
+ return 0;
+}
+
+extern int is_available_memory(efi_memory_desc_t *);
+
+static inline int page_is_ram(unsigned long pagenr)
+{
+ int i;
+ unsigned long addr, end;
+
+ if (efi_enabled) {
+ efi_memory_desc_t *md;
+
+ for (i = 0; i < memmap.nr_map; i++) {
+ md = &memmap.map[i];
+ if (!is_available_memory(md))
+ continue;
+ addr = (md->phys_addr+PAGE_SIZE-1) >> PAGE_SHIFT;
+ end = (md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT)) >> PAGE_SHIFT;
+
+ if ((pagenr >= addr) && (pagenr < end))
+ return 1;
+ }
+ return 0;
+ }
+
+ for (i = 0; i < e820.nr_map; i++) {
+
+ if (e820.map[i].type != E820_RAM) /* not usable memory */
+ continue;
+ /*
+ * !!!FIXME!!! Some BIOSen report areas as RAM that
+ * are not. Notably the 640->1Mb area. We need a sanity
+ * check here.
+ */
+ addr = (e820.map[i].addr+PAGE_SIZE-1) >> PAGE_SHIFT;
+ end = (e820.map[i].addr+e820.map[i].size) >> PAGE_SHIFT;
+ if ((pagenr >= addr) && (pagenr < end))
+ return 1;
+ }
+ return 0;
+}
+
+#ifdef CONFIG_HIGHMEM
+pte_t *kmap_pte;
+pgprot_t kmap_prot;
+
+EXPORT_SYMBOL(kmap_prot);
+EXPORT_SYMBOL(kmap_pte);
+
+#define kmap_get_fixmap_pte(vaddr) \
+ pte_offset_kernel(pmd_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr))
+
+void __init kmap_init(void)
+{
+ unsigned long kmap_vstart;
+
+ /* cache the first kmap pte */
+ kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
+ kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
+
+ kmap_prot = PAGE_KERNEL;
+}
+
+void __init permanent_kmaps_init(pgd_t *pgd_base)
+{
+ pgd_t *pgd;
+ pmd_t *pmd;
+ pte_t *pte;
+ unsigned long vaddr;
+
+ vaddr = PKMAP_BASE;
+ page_table_range_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base);
+
+ pgd = swapper_pg_dir + pgd_index(vaddr);
+ pmd = pmd_offset(pgd, vaddr);
+ pte = pte_offset_kernel(pmd, vaddr);
+ pkmap_page_table = pte;
+}
+
+void __init one_highpage_init(struct page *page, int pfn, int bad_ppro)
+{
+ if (page_is_ram(pfn) && !(bad_ppro && page_kills_ppro(pfn))) {
+ ClearPageReserved(page);
+ set_bit(PG_highmem, &page->flags);
+ set_page_count(page, 1);
- pgd_t *pgd_base = swapper_pg_dir;
++ if (pfn < xen_start_info.nr_pages)
++ __free_page(page);
+ totalhigh_pages++;
+ } else
+ SetPageReserved(page);
+}
+
+#ifndef CONFIG_DISCONTIGMEM
+void __init set_highmem_pages_init(int bad_ppro)
+{
+ int pfn;
+ for (pfn = highstart_pfn; pfn < highend_pfn; pfn++)
+ one_highpage_init(pfn_to_page(pfn), pfn, bad_ppro);
+ totalram_pages += totalhigh_pages;
+}
+#else
+extern void set_highmem_pages_init(int);
+#endif /* !CONFIG_DISCONTIGMEM */
+
+#else
+#define kmap_init() do { } while (0)
+#define permanent_kmaps_init(pgd_base) do { } while (0)
+#define set_highmem_pages_init(bad_ppro) do { } while (0)
+#endif /* CONFIG_HIGHMEM */
+
+unsigned long long __PAGE_KERNEL = _PAGE_KERNEL;
+unsigned long long __PAGE_KERNEL_EXEC = _PAGE_KERNEL_EXEC;
+
+#ifndef CONFIG_DISCONTIGMEM
+#define remap_numa_kva() do {} while (0)
+#else
+extern void __init remap_numa_kva(void);
+#endif
+
+static void __init pagetable_init (void)
+{
+ unsigned long vaddr;
- kernel_physical_mapping_init(pgd_base);
++ pgd_t *old_pgd = (pgd_t *)xen_start_info.pt_base;
++ pgd_t *new_pgd = swapper_pg_dir;
+
+#ifdef CONFIG_X86_PAE
+ int i;
+ /* Init entries of the first-level page table to the zero page */
+ for (i = 0; i < PTRS_PER_PGD; i++)
+ set_pgd(pgd_base + i, __pgd(__pa(empty_zero_page) | _PAGE_PRESENT));
+#endif
+
+ /* Enable PSE if available */
+ if (cpu_has_pse) {
+ set_in_cr4(X86_CR4_PSE);
+ }
+
+ /* Enable PGE if available */
+ if (cpu_has_pge) {
+ set_in_cr4(X86_CR4_PGE);
+ __PAGE_KERNEL |= _PAGE_GLOBAL;
+ __PAGE_KERNEL_EXEC |= _PAGE_GLOBAL;
+ }
+
- page_table_range_init(vaddr, 0, pgd_base);
++ /*
++ * Switch to proper mm_init page directory. Initialise from the current
++ * page directory, write-protect the new page directory, then switch to
++ * it. We clean up by write-enabling and then freeing the old page dir.
++ */
++ memcpy(new_pgd, old_pgd, PTRS_PER_PGD_NO_HV*sizeof(pgd_t));
++ make_page_readonly(new_pgd);
++ queue_pgd_pin(__pa(new_pgd));
++ load_cr3(new_pgd);
++ queue_pgd_unpin(__pa(old_pgd));
++ __flush_tlb_all(); /* implicit flush */
++ make_page_writable(old_pgd);
++ flush_page_update_queue();
++ free_bootmem(__pa(old_pgd), PAGE_SIZE);
++
++ kernel_physical_mapping_init(new_pgd);
+ remap_numa_kva();
+
+ /*
+ * Fixed mappings, only the page table structure has to be
+ * created - mappings will be set by set_fixmap():
+ */
+ vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
- permanent_kmaps_init(pgd_base);
++ page_table_range_init(vaddr, 0, new_pgd);
+
- pgd_base[0] = pgd_base[USER_PTRS_PER_PGD];
++ permanent_kmaps_init(new_pgd);
+
+#ifdef CONFIG_X86_PAE
+ /*
+ * Add low memory identity-mappings - SMP needs it when
+ * starting up on an AP from real-mode. In the non-PAE
+ * case we already have these mappings through head.S.
+ * All user-space mappings are explicitly cleared after
+ * SMP startup.
+ */
- pgd_t *old_pgd = (pgd_t *)xen_start_info.pt_base;
- pgd_t *new_pgd = swapper_pg_dir;
++ new_pgd[0] = new_pgd[USER_PTRS_PER_PGD];
+#endif
+}
+
+#if defined(CONFIG_PM_DISK) || defined(CONFIG_SOFTWARE_SUSPEND)
+/*
+ * Swap suspend & friends need this for resume because things like the intel-agp
+ * driver might have split up a kernel 4MB mapping.
+ */
+char __nosavedata swsusp_pg_dir[PAGE_SIZE]
+ __attribute__ ((aligned (PAGE_SIZE)));
+
+static inline void save_pg_dir(void)
+{
+ memcpy(swsusp_pg_dir, swapper_pg_dir, PAGE_SIZE);
+}
+#else
+static inline void save_pg_dir(void)
+{
+}
+#endif
+
+void zap_low_mappings (void)
+{
+ int i;
+
+ save_pg_dir();
+
+ /*
+ * Zap initial low-memory mappings.
+ *
+ * Note that "pgd_clear()" doesn't do it for
+ * us, because pgd_clear() is a no-op on i386.
+ */
+ for (i = 0; i < USER_PTRS_PER_PGD; i++)
+#ifdef CONFIG_X86_PAE
+ set_pgd(swapper_pg_dir+i, __pgd(1 + __pa(empty_zero_page)));
+#else
+ set_pgd(swapper_pg_dir+i, __pgd(0));
+#endif
+ flush_tlb_all();
+}
+
+#ifndef CONFIG_DISCONTIGMEM
+void __init zone_sizes_init(void)
+{
+ unsigned long zones_size[MAX_NR_ZONES] = {0, 0, 0};
+ unsigned int /*max_dma,*/ high, low;
+
+ /*
+ * XEN: Our notion of "DMA memory" is fake when running over Xen.
+ * We simply put all RAM in the DMA zone so that those drivers which
+ * needlessly specify GFP_DMA do not get starved of RAM unnecessarily.
+ * Those drivers that *do* require lowmem are screwed anyway when
+ * running over Xen!
+ */
+ /*max_dma = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT;*/
+ low = max_low_pfn;
+ high = highend_pfn;
+
+ /*if (low < max_dma)*/
+ zones_size[ZONE_DMA] = low;
+ /*else*/ {
+ /*zones_size[ZONE_DMA] = max_dma;*/
+ /*zones_size[ZONE_NORMAL] = low - max_dma;*/
+#ifdef CONFIG_HIGHMEM
+ zones_size[ZONE_HIGHMEM] = high - low;
+#endif
+ }
+ free_area_init(zones_size);
+}
+#else
+extern void zone_sizes_init(void);
+#endif /* !CONFIG_DISCONTIGMEM */
+
+static int disable_nx __initdata = 0;
+u64 __supported_pte_mask = ~_PAGE_NX;
+
+/*
+ * noexec = on|off
+ *
+ * Control non executable mappings.
+ *
+ * on Enable
+ * off Disable
+ */
+static int __init noexec_setup(char *str)
+{
+ if (!strncmp(str, "on",2) && cpu_has_nx) {
+ __supported_pte_mask |= _PAGE_NX;
+ disable_nx = 0;
+ } else if (!strncmp(str,"off",3)) {
+ disable_nx = 1;
+ __supported_pte_mask &= ~_PAGE_NX;
+ }
+ return 1;
+}
+
+__setup("noexec=", noexec_setup);
+
+int nx_enabled = 0;
+#ifdef CONFIG_X86_PAE
+
+static void __init set_nx(void)
+{
+ unsigned int v[4], l, h;
+
+ if (cpu_has_pae && (cpuid_eax(0x80000000) > 0x80000001)) {
+ cpuid(0x80000001, &v[0], &v[1], &v[2], &v[3]);
+ if ((v[3] & (1 << 20)) && !disable_nx) {
+ rdmsr(MSR_EFER, l, h);
+ l |= EFER_NX;
+ wrmsr(MSR_EFER, l, h);
+ nx_enabled = 1;
+ __supported_pte_mask |= _PAGE_NX;
+ }
+ }
+}
+
+/*
+ * Enables/disables executability of a given kernel page and
+ * returns the previous setting.
+ */
+int __init set_kernel_exec(unsigned long vaddr, int enable)
+{
+ pte_t *pte;
+ int ret = 1;
+
+ if (!nx_enabled)
+ goto out;
+
+ pte = lookup_address(vaddr);
+ BUG_ON(!pte);
+
+ if (!pte_exec_kernel(*pte))
+ ret = 0;
+
+ if (enable)
+ pte->pte_high &= ~(1 << (_PAGE_BIT_NX - 32));
+ else
+ pte->pte_high |= 1 << (_PAGE_BIT_NX - 32);
+ __flush_tlb_all();
+out:
+ return ret;
+}
+
+#endif
+
+/*
+ * paging_init() sets up the page tables - note that the first 8MB are
+ * already mapped by head.S.
+ *
+ * This routines also unmaps the page at virtual kernel address 0, so
+ * that we can trap those pesky NULL-reference errors in the kernel.
+ */
+void __init paging_init(void)
+{
- /*
- * Write-protect both page tables within both page tables.
- * That's three ops, as the old p.t. is already protected
- * within the old p.t. Then pin the new table, switch tables,
- * and unprotect the old table.
- */
- protect_pagetable(new_pgd, old_pgd, PROT_ON);
- protect_pagetable(new_pgd, new_pgd, PROT_ON);
- protect_pagetable(old_pgd, new_pgd, PROT_ON);
- queue_pgd_pin(__pa(new_pgd));
- load_cr3(new_pgd);
- queue_pgd_unpin(__pa(old_pgd));
- __flush_tlb_all(); /* implicit flush */
- protect_pagetable(new_pgd, old_pgd, PROT_OFF);
- flush_page_update_queue();
-
- /* Completely detached from old tables, so free them. */
- free_bootmem(__pa(old_pgd), xen_start_info.nr_pt_frames << PAGE_SHIFT);
-
+#ifdef CONFIG_XEN_PHYSDEV_ACCESS
+ int i;
+#endif
+
+#ifdef CONFIG_X86_PAE
+ set_nx();
+ if (nx_enabled)
+ printk("NX (Execute Disable) protection: active\n");
+#endif
+
+ pagetable_init();
+
+#ifdef CONFIG_X86_PAE
+ /*
+ * We will bail out later - printk doesn't work right now so
+ * the user would just see a hanging kernel.
+ */
+ if (cpu_has_pae)
+ set_in_cr4(X86_CR4_PAE);
+#endif
+ __flush_tlb_all();
+
+ kmap_init();
+ zone_sizes_init();
+
+ /* Switch to the real shared_info page, and clear the dummy page. */
+ flush_page_update_queue();
+ set_fixmap_ma(FIX_SHARED_INFO, xen_start_info.shared_info);
+ HYPERVISOR_shared_info = (shared_info_t *)fix_to_virt(FIX_SHARED_INFO);
+ memset(empty_zero_page, 0, sizeof(empty_zero_page));
+
+#ifdef CONFIG_XEN_PHYSDEV_ACCESS
+ /* Setup mapping of lower 1st MB */
+ for (i = 0; i < NR_FIX_ISAMAPS; i++)
+ if (xen_start_info.flags & SIF_PRIVILEGED)
+ set_fixmap_ma(FIX_ISAMAP_BEGIN - i, i * PAGE_SIZE);
+ else
+ set_fixmap_ma_ro(FIX_ISAMAP_BEGIN - i,
+ virt_to_machine(empty_zero_page));
+#endif
+}
+
+/*
+ * Test if the WP bit works in supervisor mode. It isn't supported on 386's
+ * and also on some strange 486's (NexGen etc.). All 586+'s are OK. This
+ * used to involve black magic jumps to work around some nasty CPU bugs,
+ * but fortunately the switch to using exceptions got rid of all that.
+ */
+
+void __init test_wp_bit(void)
+{
+ printk("Checking if this processor honours the WP bit even in supervisor mode... ");
+
+ /* Any page-aligned address will do, the test is non-destructive */
+ __set_fixmap(FIX_WP_TEST, __pa(&swapper_pg_dir), PAGE_READONLY);
+ boot_cpu_data.wp_works_ok = do_test_wp_bit();
+ clear_fixmap(FIX_WP_TEST);
+
+ if (!boot_cpu_data.wp_works_ok) {
+ printk("No.\n");
+#ifdef CONFIG_X86_WP_WORKS_OK
+ panic("This kernel doesn't support CPU's with broken WP. Recompile it for a 386!");
+#endif
+ } else {
+ printk("Ok.\n");
+ }
+}
+
+#ifndef CONFIG_DISCONTIGMEM
+static void __init set_max_mapnr_init(void)
+{
+#ifdef CONFIG_HIGHMEM
+ highmem_start_page = pfn_to_page(highstart_pfn);
+ max_mapnr = num_physpages = highend_pfn;
+#else
+ max_mapnr = num_physpages = max_low_pfn;
+#endif
+}
+#define __free_all_bootmem() free_all_bootmem()
+#else
+#define __free_all_bootmem() free_all_bootmem_node(NODE_DATA(0))
+extern void set_max_mapnr_init(void);
+#endif /* !CONFIG_DISCONTIGMEM */
+
+static struct kcore_list kcore_mem, kcore_vmalloc;
+
+void __init mem_init(void)
+{
+ extern int ppro_with_ram_bug(void);
+ int codesize, reservedpages, datasize, initsize;
+ int tmp;
+ int bad_ppro;
+
+#ifndef CONFIG_DISCONTIGMEM
+ if (!mem_map)
+ BUG();
+#endif
+
+ bad_ppro = ppro_with_ram_bug();
+
+#ifdef CONFIG_HIGHMEM
+ /* check that fixmap and pkmap do not overlap */
+ if (PKMAP_BASE+LAST_PKMAP*PAGE_SIZE >= FIXADDR_START) {
+ printk(KERN_ERR "fixmap and kmap areas overlap - this will crash\n");
+ printk(KERN_ERR "pkstart: %lxh pkend: %lxh fixstart %lxh\n",
+ PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE, FIXADDR_START);
+ BUG();
+ }
+#endif
+
+ set_max_mapnr_init();
+
+#ifdef CONFIG_HIGHMEM
+ high_memory = (void *) __va(highstart_pfn * PAGE_SIZE);
+#else
+ high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
+#endif
+
+ /* this will put all low memory onto the freelists */
+ totalram_pages += __free_all_bootmem();
+
+ reservedpages = 0;
+ for (tmp = 0; tmp < max_low_pfn; tmp++)
+ /*
+ * Only count reserved RAM pages
+ */
+ if (page_is_ram(tmp) && PageReserved(pfn_to_page(tmp)))
+ reservedpages++;
+
+ set_highmem_pages_init(bad_ppro);
+
+ codesize = (unsigned long) &_etext - (unsigned long) &_text;
+ datasize = (unsigned long) &_edata - (unsigned long) &_etext;
+ initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
+
+ kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT);
+ kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
+ VMALLOC_END-VMALLOC_START);
+
+ printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init, %ldk highmem)\n",
+ (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
+ num_physpages << (PAGE_SHIFT-10),
+ codesize >> 10,
+ reservedpages << (PAGE_SHIFT-10),
+ datasize >> 10,
+ initsize >> 10,
+ (unsigned long) (totalhigh_pages << (PAGE_SHIFT-10))
+ );
+
+#ifdef CONFIG_X86_PAE
+ if (!cpu_has_pae)
+ panic("cannot execute a PAE-enabled kernel on a PAE-less CPU!");
+#endif
+ if (boot_cpu_data.wp_works_ok < 0)
+ test_wp_bit();
+
+ /*
+ * Subtle. SMP is doing it's boot stuff late (because it has to
+ * fork idle threads) - but it also needs low mappings for the
+ * protected-mode entry to work. We zap these entries only after
+ * the WP-bit has been tested.
+ */
+#ifndef CONFIG_SMP
+ zap_low_mappings();
+#endif
+}
+
+kmem_cache_t *pgd_cache;
+kmem_cache_t *pmd_cache;
+kmem_cache_t *pte_cache;
+
+void __init pgtable_cache_init(void)
+{
+ pte_cache = kmem_cache_create("pte",
+ PTRS_PER_PTE*sizeof(pte_t),
+ PTRS_PER_PTE*sizeof(pte_t),
+ 0,
+ pte_ctor,
+ pte_dtor);
+ if (!pte_cache)
+ panic("pgtable_cache_init(): Cannot create pte cache");
+ if (PTRS_PER_PMD > 1) {
+ pmd_cache = kmem_cache_create("pmd",
+ PTRS_PER_PMD*sizeof(pmd_t),
+ PTRS_PER_PMD*sizeof(pmd_t),
+ 0,
+ pmd_ctor,
+ NULL);
+ if (!pmd_cache)
+ panic("pgtable_cache_init(): cannot create pmd cache");
+ }
+ pgd_cache = kmem_cache_create("pgd",
+ PTRS_PER_PGD*sizeof(pgd_t),
+ PTRS_PER_PGD*sizeof(pgd_t),
+ 0,
+ pgd_ctor,
+ pgd_dtor);
+ if (!pgd_cache)
+ panic("pgtable_cache_init(): Cannot create pgd cache");
+}
+
+/*
+ * This function cannot be __init, since exceptions don't work in that
+ * section. Put this after the callers, so that it cannot be inlined.
+ */
+static int noinline do_test_wp_bit(void)
+{
+ char tmp_reg;
+ int flag;
+
+ __asm__ __volatile__(
+ " movb %0,%1 \n"
+ "1: movb %1,%0 \n"
+ " xorl %2,%2 \n"
+ "2: \n"
+ ".section __ex_table,\"a\"\n"
+ " .align 4 \n"
+ " .long 1b,2b \n"
+ ".previous \n"
+ :"=m" (*(char *)fix_to_virt(FIX_WP_TEST)),
+ "=q" (tmp_reg),
+ "=r" (flag)
+ :"2" (1)
+ :"memory");
+
+ return flag;
+}
+
+void free_initmem(void)
+{
+ unsigned long addr;
+
+ addr = (unsigned long)(&__init_begin);
+ for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
+ ClearPageReserved(virt_to_page(addr));
+ set_page_count(virt_to_page(addr), 1);
+ memset((void *)addr, 0xcc, PAGE_SIZE);
+ free_page(addr);
+ totalram_pages++;
+ }
+ printk (KERN_INFO "Freeing unused kernel memory: %dk freed\n", (__init_end - __init_begin) >> 10);
+}
+
+#ifdef CONFIG_BLK_DEV_INITRD
+void free_initrd_mem(unsigned long start, unsigned long end)
+{
+ if (start < end)
+ printk (KERN_INFO "Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
+ for (; start < end; start += PAGE_SIZE) {
+ ClearPageReserved(virt_to_page(start));
+ set_page_count(virt_to_page(start), 1);
+ free_page(start);
+ totalram_pages++;
+ }
+}
+#endif
--- /dev/null
- unsigned long flags)
+/*
+ * arch/i386/mm/ioremap.c
+ *
+ * Re-map IO memory to kernel address space so that we can access it.
+ * This is needed for high PCI addresses that aren't mapped in the
+ * 640k-1MB IO memory area on PC's
+ *
+ * (C) Copyright 1995 1996 Linus Torvalds
+ */
+
+#include <linux/vmalloc.h>
+#include <linux/init.h>
+#include <linux/slab.h>
++#include <linux/module.h>
+#include <asm/io.h>
+#include <asm/fixmap.h>
+#include <asm/cacheflush.h>
+#include <asm/tlbflush.h>
+#include <asm/pgtable.h>
++#include <asm/pgalloc.h>
+
+#ifndef CONFIG_XEN_PHYSDEV_ACCESS
+
+void * __ioremap(unsigned long phys_addr, unsigned long size,
- static inline void remap_area_pte(pte_t * pte, unsigned long address, unsigned long size,
- unsigned long phys_addr, unsigned long flags)
- {
- unsigned long end;
- unsigned long pfn;
-
- address &= ~PMD_MASK;
- end = address + size;
- if (end > PMD_SIZE)
- end = PMD_SIZE;
- if (address >= end)
- BUG();
- pfn = phys_addr >> PAGE_SHIFT;
- do {
- if (!pte_none(*pte)) {
- printk("remap_area_pte: page already exists\n");
- BUG();
- }
- set_pte(pte, pfn_pte_ma(pfn, __pgprot(_PAGE_PRESENT | _PAGE_RW |
- _PAGE_DIRTY | _PAGE_ACCESSED | flags)));
- address += PAGE_SIZE;
- pfn++;
- pte++;
- } while (address && (address < end));
- }
-
- static inline int remap_area_pmd(pmd_t * pmd, unsigned long address, unsigned long size,
- unsigned long phys_addr, unsigned long flags)
- {
- unsigned long end;
-
- address &= ~PGDIR_MASK;
- end = address + size;
- if (end > PGDIR_SIZE)
- end = PGDIR_SIZE;
- phys_addr -= address;
- if (address >= end)
- BUG();
- do {
- pte_t * pte = pte_alloc_kernel(&init_mm, pmd, address);
- if (!pte)
- return -ENOMEM;
- remap_area_pte(pte, address, end - address, address + phys_addr, flags);
- address = (address + PMD_SIZE) & PMD_MASK;
- pmd++;
- } while (address && (address < end));
- return 0;
- }
-
- static int remap_area_pages(unsigned long address, unsigned long phys_addr,
- unsigned long size, unsigned long flags)
- {
- int error;
- pgd_t * dir;
- unsigned long end = address + size;
-
- phys_addr -= address;
- dir = pgd_offset(&init_mm, address);
- flush_cache_all();
- if (address >= end)
- BUG();
- spin_lock(&init_mm.page_table_lock);
- do {
- pmd_t *pmd;
- pmd = pmd_alloc(&init_mm, dir, address);
- error = -ENOMEM;
- if (!pmd)
- break;
- if (remap_area_pmd(pmd, address, end - address,
- phys_addr + address, flags))
- break;
- error = 0;
- address = (address + PGDIR_SIZE) & PGDIR_MASK;
- dir++;
- } while (address && (address < end));
- spin_unlock(&init_mm.page_table_lock);
- flush_tlb_all();
- return error;
- }
-
++ unsigned long flags)
+{
+ return NULL;
+}
+
+void *ioremap_nocache (unsigned long phys_addr, unsigned long size)
+{
+ return NULL;
+}
+
+void iounmap(volatile void __iomem *addr)
+{
+}
+
+void __init *bt_ioremap(unsigned long phys_addr, unsigned long size)
+{
+ return NULL;
+}
+
+void __init bt_iounmap(void *addr, unsigned long size)
+{
+}
+
+#else
+
+/*
+ * Does @address reside within a non-highmem page that is local to this virtual
+ * machine (i.e., not an I/O page, nor a memory page belonging to another VM).
+ * See the comment that accompanies pte_pfn() in pgtable-2level.h to understand
+ * why this works.
+ */
+static inline int is_local_lowmem(unsigned long address)
+{
+ extern unsigned long max_low_pfn;
+ unsigned long mfn = address >> PAGE_SHIFT;
+ unsigned long pfn = mfn_to_pfn(mfn);
+ return ((pfn < max_low_pfn) && (pfn_to_mfn(pfn) == mfn));
+}
+
- if (remap_area_pages((unsigned long) addr, phys_addr, size, flags)) {
+/*
+ * Generic mapping function (not visible outside):
+ */
+
+/*
+ * Remap an arbitrary physical address space into the kernel virtual
+ * address space. Needed when the kernel wants to access high addresses
+ * directly.
+ *
+ * NOTE! We need to allow non-page-aligned mappings too: we will obviously
+ * have to convert them into an offset in a page-aligned mapping, but the
+ * caller shouldn't need to know that small detail.
+ */
+void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags)
+{
+ void __iomem * addr;
+ struct vm_struct * area;
+ unsigned long offset, last_addr;
+
+ /* Don't allow wraparound or zero size */
+ last_addr = phys_addr + size - 1;
+ if (!size || last_addr < phys_addr)
+ return NULL;
+
+#ifdef CONFIG_XEN_PRIVILEGED_GUEST
+ /*
+ * Don't remap the low PCI/ISA area, it's always mapped..
+ */
+ if (phys_addr >= 0x0 && last_addr < 0x100000)
+ return isa_bus_to_virt(phys_addr);
+#endif
+
+ /*
+ * Don't allow anybody to remap normal RAM that we're using..
+ */
+ if (is_local_lowmem(phys_addr)) {
+ char *t_addr, *t_end;
+ struct page *page;
+
+ t_addr = bus_to_virt(phys_addr);
+ t_end = t_addr + (size - 1);
+
+ for(page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++)
+ if(!PageReserved(page))
+ return NULL;
+ }
+
+ /*
+ * Mappings have to be page-aligned
+ */
+ offset = phys_addr & ~PAGE_MASK;
+ phys_addr &= PAGE_MASK;
+ size = PAGE_ALIGN(last_addr+1) - phys_addr;
+
+ /*
+ * Ok, go for it..
+ */
+ area = get_vm_area(size, VM_IOREMAP);
+ if (!area)
+ return NULL;
+ area->phys_addr = phys_addr;
+ addr = (void __iomem *) area->addr;
- pte_t *pte = pte_alloc_map(mm, pmd, address);
++ if (direct_remap_area_pages(&init_mm, (unsigned long) addr, phys_addr, size, __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | flags), DOMID_IO)) {
+ vunmap((void __force *) addr);
+ return NULL;
+ }
+ return (void __iomem *) (offset + (char __iomem *)addr);
+}
+
+
+/**
+ * ioremap_nocache - map bus memory into CPU space
+ * @offset: bus address of the memory
+ * @size: size of the resource to map
+ *
+ * ioremap_nocache performs a platform specific sequence of operations to
+ * make bus memory CPU accessible via the readb/readw/readl/writeb/
+ * writew/writel functions and the other mmio helpers. The returned
+ * address is not guaranteed to be usable directly as a virtual
+ * address.
+ *
+ * This version of ioremap ensures that the memory is marked uncachable
+ * on the CPU as well as honouring existing caching rules from things like
+ * the PCI bus. Note that there are other caches and buffers on many
+ * busses. In particular driver authors should read up on PCI writes
+ *
+ * It's useful if some control registers are in such an area and
+ * write combining or read caching is not desirable:
+ *
+ * Must be freed with iounmap.
+ */
+
+void __iomem *ioremap_nocache (unsigned long phys_addr, unsigned long size)
+{
+ unsigned long last_addr;
+ void __iomem *p = __ioremap(phys_addr, size, _PAGE_PCD);
+ if (!p)
+ return p;
+
+ /* Guaranteed to be > phys_addr, as per __ioremap() */
+ last_addr = phys_addr + size - 1;
+
+ if (is_local_lowmem(last_addr)) {
+ struct page *ppage = virt_to_page(bus_to_virt(phys_addr));
+ unsigned long npages;
+
+ phys_addr &= PAGE_MASK;
+
+ /* This might overflow and become zero.. */
+ last_addr = PAGE_ALIGN(last_addr);
+
+ /* .. but that's ok, because modulo-2**n arithmetic will make
+ * the page-aligned "last - first" come out right.
+ */
+ npages = (last_addr - phys_addr) >> PAGE_SHIFT;
+
+ if (change_page_attr(ppage, npages, PAGE_KERNEL_NOCACHE) < 0) {
+ iounmap(p);
+ p = NULL;
+ }
+ global_flush_tlb();
+ }
+
+ return p;
+}
+
+void iounmap(volatile void __iomem *addr)
+{
+ struct vm_struct *p;
+ if ((void __force *) addr <= high_memory)
+ return;
+#ifdef CONFIG_XEN_PRIVILEGED_GUEST
+ if ((unsigned long) addr >= fix_to_virt(FIX_ISAMAP_BEGIN))
+ return;
+#endif
+ p = remove_vm_area((void *) (PAGE_MASK & (unsigned long __force) addr));
+ if (!p) {
+ printk("__iounmap: bad address %p\n", addr);
+ return;
+ }
+
+ if (p->flags && is_local_lowmem(p->phys_addr)) {
+ change_page_attr(virt_to_page(bus_to_virt(p->phys_addr)),
+ p->size >> PAGE_SHIFT,
+ PAGE_KERNEL);
+ global_flush_tlb();
+ }
+ kfree(p);
+}
+
+void __init *bt_ioremap(unsigned long phys_addr, unsigned long size)
+{
+ unsigned long offset, last_addr;
+ unsigned int nrpages;
+ enum fixed_addresses idx;
+
+ /* Don't allow wraparound or zero size */
+ last_addr = phys_addr + size - 1;
+ if (!size || last_addr < phys_addr)
+ return NULL;
+
+#ifdef CONFIG_XEN_PRIVILEGED_GUEST
+ /*
+ * Don't remap the low PCI/ISA area, it's always mapped..
+ */
+ if (phys_addr >= 0x0 && last_addr < 0x100000)
+ return isa_bus_to_virt(phys_addr);
+#endif
+
+ /*
+ * Mappings have to be page-aligned
+ */
+ offset = phys_addr & ~PAGE_MASK;
+ phys_addr &= PAGE_MASK;
+ size = PAGE_ALIGN(last_addr) - phys_addr;
+
+ /*
+ * Mappings have to fit in the FIX_BTMAP area.
+ */
+ nrpages = size >> PAGE_SHIFT;
+ if (nrpages > NR_FIX_BTMAPS)
+ return NULL;
+
+ /*
+ * Ok, go for it..
+ */
+ idx = FIX_BTMAP_BEGIN;
+ while (nrpages > 0) {
+ set_fixmap_ma(idx, phys_addr);
+ phys_addr += PAGE_SIZE;
+ --idx;
+ --nrpages;
+ }
+ return (void*) (offset + fix_to_virt(FIX_BTMAP_BEGIN));
+}
+
+void __init bt_iounmap(void *addr, unsigned long size)
+{
+ unsigned long virt_addr;
+ unsigned long offset;
+ unsigned int nrpages;
+ enum fixed_addresses idx;
+
+ virt_addr = (unsigned long)addr;
+ if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN))
+ return;
+#ifdef CONFIG_XEN_PRIVILEGED_GUEST
+ if (virt_addr >= fix_to_virt(FIX_ISAMAP_BEGIN))
+ return;
+#endif
+ offset = virt_addr & ~PAGE_MASK;
+ nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT;
+
+ idx = FIX_BTMAP_BEGIN;
+ while (nrpages > 0) {
+ clear_fixmap(idx);
+ --idx;
+ --nrpages;
+ }
+}
+
+#endif /* CONFIG_XEN_PHYSDEV_ACCESS */
+
+/* These hacky macros avoid phys->machine translations. */
+#define __direct_pte(x) ((pte_t) { (x) } )
+#define __direct_mk_pte(page_nr,pgprot) \
+ __direct_pte(((page_nr) << PAGE_SHIFT) | pgprot_val(pgprot))
+#define direct_mk_pte_phys(physpage, pgprot) \
+ __direct_mk_pte((physpage) >> PAGE_SHIFT, pgprot)
+
+static inline void direct_remap_area_pte(pte_t *pte,
+ unsigned long address,
+ unsigned long size,
+ mmu_update_t **v)
+{
+ unsigned long end;
+
+ address &= ~PMD_MASK;
+ end = address + size;
+ if (end > PMD_SIZE)
+ end = PMD_SIZE;
+ if (address >= end)
+ BUG();
+
+ do {
+ (*v)->ptr = virt_to_machine(pte);
+ (*v)++;
+ address += PAGE_SIZE;
+ pte++;
+ } while (address && (address < end));
+}
+
+static inline int direct_remap_area_pmd(struct mm_struct *mm,
+ pmd_t *pmd,
+ unsigned long address,
+ unsigned long size,
+ mmu_update_t **v)
+{
+ unsigned long end;
+
+ address &= ~PGDIR_MASK;
+ end = address + size;
+ if (end > PGDIR_SIZE)
+ end = PGDIR_SIZE;
+ if (address >= end)
+ BUG();
+ do {
- flush_cache_all();
++ pte_t *pte = (mm == &init_mm) ?
++ pte_alloc_kernel(mm, pmd, address) :
++ pte_alloc_map(mm, pmd, address);
+ if (!pte)
+ return -ENOMEM;
+ direct_remap_area_pte(pte, address, end - address, v);
+ pte_unmap(pte);
+ address = (address + PMD_SIZE) & PMD_MASK;
+ pmd++;
+ } while (address && (address < end));
+ return 0;
+}
+
+int __direct_remap_area_pages(struct mm_struct *mm,
+ unsigned long address,
+ unsigned long size,
+ mmu_update_t *v)
+{
+ pgd_t * dir;
+ unsigned long end = address + size;
+
+ dir = pgd_offset(mm, address);
- flush_tlb_all();
+ if (address >= end)
+ BUG();
+ spin_lock(&mm->page_table_lock);
+ do {
+ pmd_t *pmd = pmd_alloc(mm, dir, address);
+ if (!pmd)
+ return -ENOMEM;
+ direct_remap_area_pmd(mm, pmd, address, end - address, &v);
+ address = (address + PGDIR_SIZE) & PGDIR_MASK;
+ dir++;
+
+ } while (address && (address < end));
+ spin_unlock(&mm->page_table_lock);
- for(i = 0; i < size; i += PAGE_SIZE) {
+ return 0;
+}
+
+
+int direct_remap_area_pages(struct mm_struct *mm,
+ unsigned long address,
+ unsigned long machine_addr,
+ unsigned long size,
+ pgprot_t prot,
+ domid_t domid)
+{
+ int i;
+ unsigned long start_address;
+#define MAX_DIRECTMAP_MMU_QUEUE 130
+ mmu_update_t u[MAX_DIRECTMAP_MMU_QUEUE], *w, *v;
+
+ u[0].ptr = MMU_EXTENDED_COMMAND;
+ u[0].val = MMUEXT_SET_FOREIGNDOM;
+ u[0].val |= (unsigned long)domid << 16;
+ v = w = &u[1];
+
+ start_address = address;
+
-
++ flush_cache_all();
++
++ for (i = 0; i < size; i += PAGE_SIZE) {
+ if ((v - u) == MAX_DIRECTMAP_MMU_QUEUE) {
+ /* Fill in the PTE pointers. */
+ __direct_remap_area_pages(mm,
+ start_address,
+ address-start_address,
+ w);
- return -EFAULT;
++
+ if (HYPERVISOR_mmu_update(u, v - u, NULL) < 0)
- w);
++ return -EFAULT;
+ v = w;
+ start_address = address;
+ }
+
+ /*
+ * Fill in the machine address: PTE ptr is done later by
+ * __direct_remap_area_pages().
+ */
+ v->val = (machine_addr & PAGE_MASK) | pgprot_val(prot);
+
+ machine_addr += PAGE_SIZE;
+ address += PAGE_SIZE;
+ v++;
+ }
+
+ if (v != w) {
+ /* get the ptep's filled in */
+ __direct_remap_area_pages(mm,
+ start_address,
+ address-start_address,
- return -EFAULT;
++ w);
+ if (unlikely(HYPERVISOR_mmu_update(u, v - u, NULL) < 0))
-
++ return -EFAULT;
+ }
++
++ flush_tlb_all();
++
+ return 0;
+}
++
++EXPORT_SYMBOL(direct_remap_area_pages);
--- /dev/null
- obj-y := ctrl_if.o evtchn.o fixup.o reboot.o xen_proc.o gnttab.o skbuff.o
+#
+# Makefile for the linux kernel.
+#
+
+XENARCH := $(subst ",,$(CONFIG_XENARCH))
+
+CPPFLAGS_vmlinux.lds += -U$(XENARCH)
+
+$(obj)/vmlinux.lds.S:
+ @ln -fsn $(srctree)/arch/$(XENARCH)/kernel/vmlinux.lds.S $@
+
+extra-y += vmlinux.lds
+
++obj-y := ctrl_if.o evtchn.o fixup.o reboot.o xen_proc.o gnttab.o skbuff.o devmem.o
--- /dev/null
+/******************************************************************************
+ * ctrl_if.c
+ *
+ * Management functions for special interface to the domain controller.
+ *
+ * Copyright (c) 2004, K A Fraser
+ *
+ * This file may be distributed separately from the Linux kernel, or
+ * incorporated into other software packages, subject to the following license:
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
++#include <linux/module.h>
+#include <asm-xen/ctrl_if.h>
+#include <asm-xen/evtchn.h>
+
+#if 0
+#define DPRINTK(_f, _a...) printk(KERN_ALERT "(file=%s, line=%d) " _f, \
+ __FILE__ , __LINE__ , ## _a )
+#else
+#define DPRINTK(_f, _a...) ((void)0)
+#endif
+
+/*
+ * Only used by initial domain which must create its own control-interface
+ * event channel. This value is picked up by the user-space domain controller
+ * via an ioctl.
+ */
+int initdom_ctrlif_domcontroller_port = -1;
+
+static int ctrl_if_evtchn;
+static int ctrl_if_irq;
+static spinlock_t ctrl_if_lock;
+
+static struct irqaction ctrl_if_irq_action;
+
+static CONTROL_RING_IDX ctrl_if_tx_resp_cons;
+static CONTROL_RING_IDX ctrl_if_rx_req_cons;
+
+/* Incoming message requests. */
+ /* Primary message type -> message handler. */
+static ctrl_msg_handler_t ctrl_if_rxmsg_handler[256];
+ /* Primary message type -> callback in process context? */
+static unsigned long ctrl_if_rxmsg_blocking_context[256/sizeof(unsigned long)];
+ /* Is it late enough during bootstrap to use schedule_task()? */
+static int safe_to_schedule_task;
+ /* Queue up messages to be handled in process context. */
+static ctrl_msg_t ctrl_if_rxmsg_deferred[CONTROL_RING_SIZE];
+static CONTROL_RING_IDX ctrl_if_rxmsg_deferred_prod;
+static CONTROL_RING_IDX ctrl_if_rxmsg_deferred_cons;
+
+/* Incoming message responses: message identifier -> message handler/id. */
+static struct {
+ ctrl_msg_handler_t fn;
+ unsigned long id;
+} ctrl_if_txmsg_id_mapping[CONTROL_RING_SIZE];
+
+/* For received messages that must be deferred to process context. */
+static void __ctrl_if_rxmsg_deferred(void *unused);
+static DECLARE_WORK(ctrl_if_rxmsg_deferred_work,
+ __ctrl_if_rxmsg_deferred,
+ NULL);
+
+/* Deferred callbacks for people waiting for space in the transmit ring. */
+static DECLARE_TASK_QUEUE(ctrl_if_tx_tq);
+
+static DECLARE_WAIT_QUEUE_HEAD(ctrl_if_tx_wait);
+static void __ctrl_if_tx_tasklet(unsigned long data);
+static DECLARE_TASKLET(ctrl_if_tx_tasklet, __ctrl_if_tx_tasklet, 0);
+
+static void __ctrl_if_rx_tasklet(unsigned long data);
+static DECLARE_TASKLET(ctrl_if_rx_tasklet, __ctrl_if_rx_tasklet, 0);
+
+#define get_ctrl_if() ((control_if_t *)((char *)HYPERVISOR_shared_info + 2048))
+#define TX_FULL(_c) \
+ (((_c)->tx_req_prod - ctrl_if_tx_resp_cons) == CONTROL_RING_SIZE)
+
+static void ctrl_if_notify_controller(void)
+{
+ notify_via_evtchn(ctrl_if_evtchn);
+}
+
+static void ctrl_if_rxmsg_default_handler(ctrl_msg_t *msg, unsigned long id)
+{
+ msg->length = 0;
+ ctrl_if_send_response(msg);
+}
+
+static void __ctrl_if_tx_tasklet(unsigned long data)
+{
+ control_if_t *ctrl_if = get_ctrl_if();
+ ctrl_msg_t *msg;
+ int was_full = TX_FULL(ctrl_if);
+ CONTROL_RING_IDX rp;
+
+ rp = ctrl_if->tx_resp_prod;
+ rmb(); /* Ensure we see all requests up to 'rp'. */
+
+ while ( ctrl_if_tx_resp_cons != rp )
+ {
+ msg = &ctrl_if->tx_ring[MASK_CONTROL_IDX(ctrl_if_tx_resp_cons)];
+
+ DPRINTK("Rx-Rsp %u/%u :: %d/%d\n",
+ ctrl_if_tx_resp_cons,
+ ctrl_if->tx_resp_prod,
+ msg->type, msg->subtype);
+
+ /* Execute the callback handler, if one was specified. */
+ if ( msg->id != 0xFF )
+ {
+ (*ctrl_if_txmsg_id_mapping[msg->id].fn)(
+ msg, ctrl_if_txmsg_id_mapping[msg->id].id);
+ smp_mb(); /* Execute, /then/ free. */
+ ctrl_if_txmsg_id_mapping[msg->id].fn = NULL;
+ }
+
+ /*
+ * Step over the message in the ring /after/ finishing reading it. As
+ * soon as the index is updated then the message may get blown away.
+ */
+ smp_mb();
+ ctrl_if_tx_resp_cons++;
+ }
+
+ if ( was_full && !TX_FULL(ctrl_if) )
+ {
+ wake_up(&ctrl_if_tx_wait);
+ run_task_queue(&ctrl_if_tx_tq);
+ }
+}
+
+static void __ctrl_if_rxmsg_deferred(void *unused)
+{
+ ctrl_msg_t *msg;
+ CONTROL_RING_IDX dp;
+
+ dp = ctrl_if_rxmsg_deferred_prod;
+ rmb(); /* Ensure we see all deferred requests up to 'dp'. */
+
+ while ( ctrl_if_rxmsg_deferred_cons != dp )
+ {
+ msg = &ctrl_if_rxmsg_deferred[MASK_CONTROL_IDX(
+ ctrl_if_rxmsg_deferred_cons++)];
+ (*ctrl_if_rxmsg_handler[msg->type])(msg, 0);
+ }
+}
+
+static void __ctrl_if_rx_tasklet(unsigned long data)
+{
+ control_if_t *ctrl_if = get_ctrl_if();
+ ctrl_msg_t msg, *pmsg;
+ CONTROL_RING_IDX rp, dp;
+
+ dp = ctrl_if_rxmsg_deferred_prod;
+ rp = ctrl_if->rx_req_prod;
+ rmb(); /* Ensure we see all requests up to 'rp'. */
+
+ while ( ctrl_if_rx_req_cons != rp )
+ {
+ pmsg = &ctrl_if->rx_ring[MASK_CONTROL_IDX(ctrl_if_rx_req_cons++)];
+ memcpy(&msg, pmsg, offsetof(ctrl_msg_t, msg));
+
+ DPRINTK("Rx-Req %u/%u :: %d/%d\n",
+ ctrl_if_rx_req_cons-1,
+ ctrl_if->rx_req_prod,
+ msg.type, msg.subtype);
+
+ if ( msg.length != 0 )
+ memcpy(msg.msg, pmsg->msg, msg.length);
+
+ if ( test_bit(msg.type,
+ (unsigned long *)&ctrl_if_rxmsg_blocking_context) )
+ memcpy(&ctrl_if_rxmsg_deferred[MASK_CONTROL_IDX(dp++)],
+ &msg, offsetof(ctrl_msg_t, msg) + msg.length);
+ else
+ (*ctrl_if_rxmsg_handler[msg.type])(&msg, 0);
+ }
+
+ if ( dp != ctrl_if_rxmsg_deferred_prod )
+ {
+ wmb();
+ ctrl_if_rxmsg_deferred_prod = dp;
+ schedule_work(&ctrl_if_rxmsg_deferred_work);
+ }
+}
+
+static irqreturn_t ctrl_if_interrupt(int irq, void *dev_id,
+ struct pt_regs *regs)
+{
+ control_if_t *ctrl_if = get_ctrl_if();
+
+ if ( ctrl_if_tx_resp_cons != ctrl_if->tx_resp_prod )
+ tasklet_schedule(&ctrl_if_tx_tasklet);
+
+ if ( ctrl_if_rx_req_cons != ctrl_if->rx_req_prod )
+ tasklet_schedule(&ctrl_if_rx_tasklet);
+
+ return IRQ_HANDLED;
+}
+
+int
+ctrl_if_send_message_noblock(
+ ctrl_msg_t *msg,
+ ctrl_msg_handler_t hnd,
+ unsigned long id)
+{
+ control_if_t *ctrl_if = get_ctrl_if();
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&ctrl_if_lock, flags);
+
+ if ( TX_FULL(ctrl_if) )
+ {
+ spin_unlock_irqrestore(&ctrl_if_lock, flags);
+ return -EAGAIN;
+ }
+
+ msg->id = 0xFF;
+ if ( hnd != NULL )
+ {
+ for ( i = 0; ctrl_if_txmsg_id_mapping[i].fn != NULL; i++ )
+ continue;
+ ctrl_if_txmsg_id_mapping[i].fn = hnd;
+ ctrl_if_txmsg_id_mapping[i].id = id;
+ msg->id = i;
+ }
+
+ DPRINTK("Tx-Req %u/%u :: %d/%d\n",
+ ctrl_if->tx_req_prod,
+ ctrl_if_tx_resp_cons,
+ msg->type, msg->subtype);
+
+ memcpy(&ctrl_if->tx_ring[MASK_CONTROL_IDX(ctrl_if->tx_req_prod)],
+ msg, sizeof(*msg));
+ wmb(); /* Write the message before letting the controller peek at it. */
+ ctrl_if->tx_req_prod++;
+
+ spin_unlock_irqrestore(&ctrl_if_lock, flags);
+
+ ctrl_if_notify_controller();
+
+ return 0;
+}
+
+int
+ctrl_if_send_message_block(
+ ctrl_msg_t *msg,
+ ctrl_msg_handler_t hnd,
+ unsigned long id,
+ long wait_state)
+{
+ DECLARE_WAITQUEUE(wait, current);
+ int rc;
+
+ /* Fast path. */
+ if ( (rc = ctrl_if_send_message_noblock(msg, hnd, id)) != -EAGAIN )
+ return rc;
+
+ add_wait_queue(&ctrl_if_tx_wait, &wait);
+
+ for ( ; ; )
+ {
+ set_current_state(wait_state);
+
+ if ( (rc = ctrl_if_send_message_noblock(msg, hnd, id)) != -EAGAIN )
+ break;
+
+ rc = -ERESTARTSYS;
+ if ( signal_pending(current) && (wait_state == TASK_INTERRUPTIBLE) )
+ break;
+
+ schedule();
+ }
+
+ set_current_state(TASK_RUNNING);
+ remove_wait_queue(&ctrl_if_tx_wait, &wait);
+
+ return rc;
+}
+
+/* Allow a reponse-callback handler to find context of a blocked requester. */
+struct rsp_wait {
+ ctrl_msg_t *msg; /* Buffer for the response message. */
+ struct task_struct *task; /* The task that is blocked on the response. */
+ int done; /* Indicate to 'task' that response is rcv'ed. */
+};
+
+static void __ctrl_if_get_response(ctrl_msg_t *msg, unsigned long id)
+{
+ struct rsp_wait *wait = (struct rsp_wait *)id;
+ struct task_struct *task = wait->task;
+
+ memcpy(wait->msg, msg, sizeof(*msg));
+ wmb();
+ wait->done = 1;
+
+ wake_up_process(task);
+}
+
+int
+ctrl_if_send_message_and_get_response(
+ ctrl_msg_t *msg,
+ ctrl_msg_t *rmsg,
+ long wait_state)
+{
+ struct rsp_wait wait;
+ int rc;
+
+ wait.msg = rmsg;
+ wait.done = 0;
+ wait.task = current;
+
+ if ( (rc = ctrl_if_send_message_block(msg, __ctrl_if_get_response,
+ (unsigned long)&wait,
+ wait_state)) != 0 )
+ return rc;
+
+ for ( ; ; )
+ {
+ /* NB. Can't easily support TASK_INTERRUPTIBLE here. */
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ if ( wait.done )
+ break;
+ schedule();
+ }
+
+ set_current_state(TASK_RUNNING);
+ return 0;
+}
+
+int
+ctrl_if_enqueue_space_callback(
+ struct tq_struct *task)
+{
+ control_if_t *ctrl_if = get_ctrl_if();
+
+ /* Fast path. */
+ if ( !TX_FULL(ctrl_if) )
+ return 0;
+
+ (void)queue_task(task, &ctrl_if_tx_tq);
+
+ /*
+ * We may race execution of the task queue, so return re-checked status. If
+ * the task is not executed despite the ring being non-full then we will
+ * certainly return 'not full'.
+ */
+ smp_mb();
+ return TX_FULL(ctrl_if);
+}
+
+void
+ctrl_if_send_response(
+ ctrl_msg_t *msg)
+{
+ control_if_t *ctrl_if = get_ctrl_if();
+ unsigned long flags;
+ ctrl_msg_t *dmsg;
+
+ /*
+ * NB. The response may the original request message, modified in-place.
+ * In this situation we may have src==dst, so no copying is required.
+ */
+ spin_lock_irqsave(&ctrl_if_lock, flags);
+
+ DPRINTK("Tx-Rsp %u :: %d/%d\n",
+ ctrl_if->rx_resp_prod,
+ msg->type, msg->subtype);
+
+ dmsg = &ctrl_if->rx_ring[MASK_CONTROL_IDX(ctrl_if->rx_resp_prod)];
+ if ( dmsg != msg )
+ memcpy(dmsg, msg, sizeof(*msg));
+
+ wmb(); /* Write the message before letting the controller peek at it. */
+ ctrl_if->rx_resp_prod++;
+
+ spin_unlock_irqrestore(&ctrl_if_lock, flags);
+
+ ctrl_if_notify_controller();
+}
+
+int
+ctrl_if_register_receiver(
+ u8 type,
+ ctrl_msg_handler_t hnd,
+ unsigned int flags)
+{
+ unsigned long _flags;
+ int inuse;
+
+ spin_lock_irqsave(&ctrl_if_lock, _flags);
+
+ inuse = (ctrl_if_rxmsg_handler[type] != ctrl_if_rxmsg_default_handler);
+
+ if ( inuse )
+ {
+ printk(KERN_INFO "Receiver %p already established for control "
+ "messages of type %d.\n", ctrl_if_rxmsg_handler[type], type);
+ }
+ else
+ {
+ ctrl_if_rxmsg_handler[type] = hnd;
+ clear_bit(type, (unsigned long *)&ctrl_if_rxmsg_blocking_context);
+ if ( flags == CALLBACK_IN_BLOCKING_CONTEXT )
+ {
+ set_bit(type, (unsigned long *)&ctrl_if_rxmsg_blocking_context);
+ if ( !safe_to_schedule_task )
+ BUG();
+ }
+ }
+
+ spin_unlock_irqrestore(&ctrl_if_lock, _flags);
+
+ return !inuse;
+}
+
+void
+ctrl_if_unregister_receiver(
+ u8 type,
+ ctrl_msg_handler_t hnd)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctrl_if_lock, flags);
+
+ if ( ctrl_if_rxmsg_handler[type] != hnd )
+ printk(KERN_INFO "Receiver %p is not registered for control "
+ "messages of type %d.\n", hnd, type);
+ else
+ ctrl_if_rxmsg_handler[type] = ctrl_if_rxmsg_default_handler;
+
+ spin_unlock_irqrestore(&ctrl_if_lock, flags);
+
+ /* Ensure that @hnd will not be executed after this function returns. */
+ tasklet_unlock_wait(&ctrl_if_rx_tasklet);
+}
+
+void ctrl_if_suspend(void)
+{
+ teardown_irq(ctrl_if_irq, &ctrl_if_irq_action);
+ unbind_evtchn_from_irq(ctrl_if_evtchn);
+}
+
+void ctrl_if_resume(void)
+{
+ control_if_t *ctrl_if = get_ctrl_if();
+
+ if ( xen_start_info.flags & SIF_INITDOMAIN )
+ {
+ /*
+ * The initial domain must create its own domain-controller link.
+ * The controller is probably not running at this point, but will
+ * pick up its end of the event channel from
+ */
+ evtchn_op_t op;
+ op.cmd = EVTCHNOP_bind_interdomain;
+ op.u.bind_interdomain.dom1 = DOMID_SELF;
+ op.u.bind_interdomain.dom2 = DOMID_SELF;
+ op.u.bind_interdomain.port1 = 0;
+ op.u.bind_interdomain.port2 = 0;
+ if ( HYPERVISOR_event_channel_op(&op) != 0 )
+ BUG();
+ xen_start_info.domain_controller_evtchn = op.u.bind_interdomain.port1;
+ initdom_ctrlif_domcontroller_port = op.u.bind_interdomain.port2;
+ }
+
+ /* Sync up with shared indexes. */
+ ctrl_if_tx_resp_cons = ctrl_if->tx_resp_prod;
+ ctrl_if_rx_req_cons = ctrl_if->rx_resp_prod;
+
+ ctrl_if_evtchn = xen_start_info.domain_controller_evtchn;
+ ctrl_if_irq = bind_evtchn_to_irq(ctrl_if_evtchn);
+
+ memset(&ctrl_if_irq_action, 0, sizeof(ctrl_if_irq_action));
+ ctrl_if_irq_action.handler = ctrl_if_interrupt;
+ ctrl_if_irq_action.name = "ctrl-if";
+ (void)setup_irq(ctrl_if_irq, &ctrl_if_irq_action);
+}
+
+void __init ctrl_if_init(void)
+{
+ int i;
+
+ for ( i = 0; i < 256; i++ )
+ ctrl_if_rxmsg_handler[i] = ctrl_if_rxmsg_default_handler;
+
+ spin_lock_init(&ctrl_if_lock);
+
+ ctrl_if_resume();
+}
+
+
+/* This is called after it is safe to call schedule_task(). */
+static int __init ctrl_if_late_setup(void)
+{
+ safe_to_schedule_task = 1;
+ return 0;
+}
+__initcall(ctrl_if_late_setup);
+
+
+/*
+ * !! The following are DANGEROUS FUNCTIONS !!
+ * Use with care [for example, see xencons_force_flush()].
+ */
+
+int ctrl_if_transmitter_empty(void)
+{
+ return (get_ctrl_if()->tx_req_prod == ctrl_if_tx_resp_cons);
+}
+
+void ctrl_if_discard_responses(void)
+{
+ ctrl_if_tx_resp_cons = get_ctrl_if()->tx_resp_prod;
+}
+
++EXPORT_SYMBOL(ctrl_if_send_message_noblock);
++EXPORT_SYMBOL(ctrl_if_send_message_block);
++EXPORT_SYMBOL(ctrl_if_send_message_and_get_response);
++EXPORT_SYMBOL(ctrl_if_enqueue_space_callback);
++EXPORT_SYMBOL(ctrl_if_send_response);
++EXPORT_SYMBOL(ctrl_if_register_receiver);
++EXPORT_SYMBOL(ctrl_if_unregister_receiver);
--- /dev/null
- #ifdef CONFIG_XEN
- if (file->f_flags & O_SYNC)
- return 1;
- /* Xen sets correct MTRR type on non-RAM for us. */
- return 0;
- #elif defined(__i386__)
+/*
+ * linux/drivers/char/mem.c
+ *
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ *
+ * Added devfs support.
+ * Jan-11-1998, C. Scott Ananian <cananian@alumni.princeton.edu>
+ * Shared /dev/zero mmaping support, Feb 2000, Kanoj Sarcar <kanoj@sgi.com>
+ */
+
+#include <linux/config.h>
+#include <linux/mm.h>
+#include <linux/miscdevice.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/mman.h>
+#include <linux/random.h>
+#include <linux/init.h>
+#include <linux/raw.h>
+#include <linux/tty.h>
+#include <linux/capability.h>
+#include <linux/smp_lock.h>
+#include <linux/devfs_fs_kernel.h>
+#include <linux/ptrace.h>
+#include <linux/device.h>
+
+#include <asm/uaccess.h>
+#include <asm/io.h>
+
+#ifdef CONFIG_IA64
+# include <linux/efi.h>
+#endif
+
+#if defined(CONFIG_S390_TAPE) && defined(CONFIG_S390_TAPE_CHAR)
+extern void tapechar_init(void);
+#endif
+
+/*
+ * Architectures vary in how they handle caching for addresses
+ * outside of main memory.
+ *
+ */
+static inline int uncached_access(struct file *file, unsigned long addr)
+{
-
++#if defined(__i386__)
+ /*
+ * On the PPro and successors, the MTRRs are used to set
+ * memory types for physical addresses outside main memory,
+ * so blindly setting PCD or PWT on those pages is wrong.
+ * For Pentiums and earlier, the surround logic should disable
+ * caching for the high addresses through the KEN pin, but
+ * we maintain the tradition of paranoia in this code.
+ */
+ if (file->f_flags & O_SYNC)
+ return 1;
+ return !( test_bit(X86_FEATURE_MTRR, boot_cpu_data.x86_capability) ||
+ test_bit(X86_FEATURE_K6_MTRR, boot_cpu_data.x86_capability) ||
+ test_bit(X86_FEATURE_CYRIX_ARR, boot_cpu_data.x86_capability) ||
+ test_bit(X86_FEATURE_CENTAUR_MCR, boot_cpu_data.x86_capability) )
+ && addr >= __pa(high_memory);
+#elif defined(__x86_64__)
+ /*
+ * This is broken because it can generate memory type aliases,
+ * which can cause cache corruptions
+ * But it is only available for root and we have to be bug-to-bug
+ * compatible with i386.
+ */
+ if (file->f_flags & O_SYNC)
+ return 1;
+ /* same behaviour as i386. PAT always set to cached and MTRRs control the
+ caching behaviour.
+ Hopefully a full PAT implementation will fix that soon. */
+ return 0;
+#elif defined(CONFIG_IA64)
+ /*
+ * On ia64, we ignore O_SYNC because we cannot tolerate memory attribute aliases.
+ */
+ return !(efi_mem_attributes(addr) & EFI_MEMORY_WB);
+#elif defined(CONFIG_PPC64)
+ /* On PPC64, we always do non-cacheable access to the IO hole and
+ * cacheable elsewhere. Cache paradox can checkstop the CPU and
+ * the high_memory heuristic below is wrong on machines with memory
+ * above the IO hole... Ah, and of course, XFree86 doesn't pass
+ * O_SYNC when mapping us to tap IO space. Surprised ?
+ */
+ return !page_is_ram(addr >> PAGE_SHIFT);
+#else
+ /*
+ * Accessing memory above the top the kernel knows about or through a file pointer
+ * that was marked O_SYNC will be done non-cached.
+ */
+ if (file->f_flags & O_SYNC)
+ return 1;
+ return addr >= __pa(high_memory);
+#endif
+}
+
+#ifndef ARCH_HAS_VALID_PHYS_ADDR_RANGE
+static inline int valid_phys_addr_range(unsigned long addr, size_t *count)
+{
+ unsigned long end_mem;
+
+ end_mem = __pa(high_memory);
+ if (addr >= end_mem)
+ return 0;
+
+ if (*count > end_mem - addr)
+ *count = end_mem - addr;
+
+ return 1;
+}
+#endif
+
+static ssize_t do_write_mem(void *p, unsigned long realp,
+ const char __user * buf, size_t count, loff_t *ppos)
+{
+ ssize_t written;
+ unsigned long copied;
+
+ written = 0;
+#if defined(__sparc__) || (defined(__mc68000__) && defined(CONFIG_MMU))
+ /* we don't have page 0 mapped on sparc and m68k.. */
+ if (realp < PAGE_SIZE) {
+ unsigned long sz = PAGE_SIZE-realp;
+ if (sz > count) sz = count;
+ /* Hmm. Do something? */
+ buf+=sz;
+ p+=sz;
+ count-=sz;
+ written+=sz;
+ }
+#endif
+ copied = copy_from_user(p, buf, count);
+ if (copied) {
+ ssize_t ret = written + (count - copied);
+
+ if (ret)
+ return ret;
+ return -EFAULT;
+ }
+ written += count;
+ *ppos += written;
+ return written;
+}
+
- static int mmap_mem(struct file * file, struct vm_area_struct * vma)
++#ifndef ARCH_HAS_DEV_MEM
+/*
+ * This funcion reads the *physical* memory. The f_pos points directly to the
+ * memory location.
+ */
+static ssize_t read_mem(struct file * file, char __user * buf,
+ size_t count, loff_t *ppos)
+{
+ unsigned long p = *ppos;
+ ssize_t read;
+
+ if (!valid_phys_addr_range(p, &count))
+ return -EFAULT;
+ read = 0;
+#if defined(__sparc__) || (defined(__mc68000__) && defined(CONFIG_MMU))
+ /* we don't have page 0 mapped on sparc and m68k.. */
+ if (p < PAGE_SIZE) {
+ unsigned long sz = PAGE_SIZE-p;
+ if (sz > count)
+ sz = count;
+ if (sz > 0) {
+ if (clear_user(buf, sz))
+ return -EFAULT;
+ buf += sz;
+ p += sz;
+ count -= sz;
+ read += sz;
+ }
+ }
+#endif
+ if (copy_to_user(buf, __va(p), count))
+ return -EFAULT;
+ read += count;
+ *ppos += read;
+ return read;
+}
+
+static ssize_t write_mem(struct file * file, const char __user * buf,
+ size_t count, loff_t *ppos)
+{
+ unsigned long p = *ppos;
+
+ if (!valid_phys_addr_range(p, &count))
+ return -EFAULT;
+ return do_write_mem(__va(p), p, buf, count, ppos);
+}
++#endif
+
- #if defined(CONFIG_XEN)
- if (io_remap_page_range(vma,
- vma->vm_start,
- vma->vm_pgoff << PAGE_SHIFT,
- vma->vm_end-vma->vm_start,
- vma->vm_page_prot))
- return -EAGAIN;
- #else
- /* Remap-pfn-range will mark the range VM_IO and VM_RESERVED */
- if (remap_pfn_range(vma,
- vma->vm_start,
- vma->vm_pgoff,
- vma->vm_end-vma->vm_start,
- vma->vm_page_prot))
++static int mmap_kmem(struct file * file, struct vm_area_struct * vma)
+{
+#ifdef pgprot_noncached
+ unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
+ int uncached;
+
+ uncached = uncached_access(file, offset);
+ if (uncached)
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+#endif
+
- #endif
++ /* Don't try to swap out physical pages.. */
++ vma->vm_flags |= VM_RESERVED;
++
++ /*
++ * Don't dump addresses that are not real memory to a core file.
++ */
++ if (uncached)
++ vma->vm_flags |= VM_IO;
++
++ if (remap_page_range(vma, vma->vm_start, offset, vma->vm_end-vma->vm_start,
++ vma->vm_page_prot))
+ return -EAGAIN;
- #define mmap_kmem mmap_mem
+ return 0;
+}
+
+extern long vread(char *buf, char *addr, unsigned long count);
+extern long vwrite(char *buf, char *addr, unsigned long count);
+
+/*
+ * This function reads the *virtual* memory as seen by the kernel.
+ */
+static ssize_t read_kmem(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ unsigned long p = *ppos;
+ ssize_t read = 0;
+ ssize_t virtr = 0;
+ char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
+
+ if (p < (unsigned long) high_memory) {
+ read = count;
+ if (count > (unsigned long) high_memory - p)
+ read = (unsigned long) high_memory - p;
+
+#if defined(__sparc__) || (defined(__mc68000__) && defined(CONFIG_MMU))
+ /* we don't have page 0 mapped on sparc and m68k.. */
+ if (p < PAGE_SIZE && read > 0) {
+ size_t tmp = PAGE_SIZE - p;
+ if (tmp > read) tmp = read;
+ if (clear_user(buf, tmp))
+ return -EFAULT;
+ buf += tmp;
+ p += tmp;
+ read -= tmp;
+ count -= tmp;
+ }
+#endif
+ if (copy_to_user(buf, (char *)p, read))
+ return -EFAULT;
+ p += read;
+ buf += read;
+ count -= read;
+ }
+
+ if (count > 0) {
+ kbuf = (char *)__get_free_page(GFP_KERNEL);
+ if (!kbuf)
+ return -ENOMEM;
+ while (count > 0) {
+ int len = count;
+
+ if (len > PAGE_SIZE)
+ len = PAGE_SIZE;
+ len = vread(kbuf, (char *)p, len);
+ if (!len)
+ break;
+ if (copy_to_user(buf, kbuf, len)) {
+ free_page((unsigned long)kbuf);
+ return -EFAULT;
+ }
+ count -= len;
+ buf += len;
+ virtr += len;
+ p += len;
+ }
+ free_page((unsigned long)kbuf);
+ }
+ *ppos = p;
+ return virtr + read;
+}
+
+/*
+ * This function writes to the *virtual* memory as seen by the kernel.
+ */
+static ssize_t write_kmem(struct file * file, const char __user * buf,
+ size_t count, loff_t *ppos)
+{
+ unsigned long p = *ppos;
+ ssize_t wrote = 0;
+ ssize_t virtr = 0;
+ ssize_t written;
+ char * kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */
+
+ if (p < (unsigned long) high_memory) {
+
+ wrote = count;
+ if (count > (unsigned long) high_memory - p)
+ wrote = (unsigned long) high_memory - p;
+
+ written = do_write_mem((void*)p, p, buf, wrote, ppos);
+ if (written != wrote)
+ return written;
+ wrote = written;
+ p += wrote;
+ buf += wrote;
+ count -= wrote;
+ }
+
+ if (count > 0) {
+ kbuf = (char *)__get_free_page(GFP_KERNEL);
+ if (!kbuf)
+ return wrote ? wrote : -ENOMEM;
+ while (count > 0) {
+ int len = count;
+
+ if (len > PAGE_SIZE)
+ len = PAGE_SIZE;
+ if (len) {
+ written = copy_from_user(kbuf, buf, len);
+ if (written) {
+ ssize_t ret;
+
+ free_page((unsigned long)kbuf);
+ ret = wrote + virtr + (len - written);
+ return ret ? ret : -EFAULT;
+ }
+ }
+ len = vwrite(kbuf, (char *)p, len);
+ count -= len;
+ buf += len;
+ virtr += len;
+ p += len;
+ }
+ free_page((unsigned long)kbuf);
+ }
+
+ *ppos = p;
+ return virtr + wrote;
+}
+
+#if defined(CONFIG_ISA) || !defined(__mc68000__)
+static ssize_t read_port(struct file * file, char __user * buf,
+ size_t count, loff_t *ppos)
+{
+ unsigned long i = *ppos;
+ char __user *tmp = buf;
+
+ if (verify_area(VERIFY_WRITE,buf,count))
+ return -EFAULT;
+ while (count-- > 0 && i < 65536) {
+ if (__put_user(inb(i),tmp) < 0)
+ return -EFAULT;
+ i++;
+ tmp++;
+ }
+ *ppos = i;
+ return tmp-buf;
+}
+
+static ssize_t write_port(struct file * file, const char __user * buf,
+ size_t count, loff_t *ppos)
+{
+ unsigned long i = *ppos;
+ const char __user * tmp = buf;
+
+ if (verify_area(VERIFY_READ,buf,count))
+ return -EFAULT;
+ while (count-- > 0 && i < 65536) {
+ char c;
+ if (__get_user(c, tmp))
+ return -EFAULT;
+ outb(c,i);
+ i++;
+ tmp++;
+ }
+ *ppos = i;
+ return tmp-buf;
+}
+#endif
+
+static ssize_t read_null(struct file * file, char __user * buf,
+ size_t count, loff_t *ppos)
+{
+ return 0;
+}
+
+static ssize_t write_null(struct file * file, const char __user * buf,
+ size_t count, loff_t *ppos)
+{
+ return count;
+}
+
+#ifdef CONFIG_MMU
+/*
+ * For fun, we are using the MMU for this.
+ */
+static inline size_t read_zero_pagealigned(char __user * buf, size_t size)
+{
+ struct mm_struct *mm;
+ struct vm_area_struct * vma;
+ unsigned long addr=(unsigned long)buf;
+
+ mm = current->mm;
+ /* Oops, this was forgotten before. -ben */
+ down_read(&mm->mmap_sem);
+
+ /* For private mappings, just map in zero pages. */
+ for (vma = find_vma(mm, addr); vma; vma = vma->vm_next) {
+ unsigned long count;
+
+ if (vma->vm_start > addr || (vma->vm_flags & VM_WRITE) == 0)
+ goto out_up;
+ if (vma->vm_flags & (VM_SHARED | VM_HUGETLB))
+ break;
+ count = vma->vm_end - addr;
+ if (count > size)
+ count = size;
+
+ zap_page_range(vma, addr, count, NULL);
+ zeromap_page_range(vma, addr, count, PAGE_COPY);
+
+ size -= count;
+ buf += count;
+ addr += count;
+ if (size == 0)
+ goto out_up;
+ }
+
+ up_read(&mm->mmap_sem);
+
+ /* The shared case is hard. Let's do the conventional zeroing. */
+ do {
+ unsigned long unwritten = clear_user(buf, PAGE_SIZE);
+ if (unwritten)
+ return size + unwritten - PAGE_SIZE;
+ cond_resched();
+ buf += PAGE_SIZE;
+ size -= PAGE_SIZE;
+ } while (size);
+
+ return size;
+out_up:
+ up_read(&mm->mmap_sem);
+ return size;
+}
+
+static ssize_t read_zero(struct file * file, char __user * buf,
+ size_t count, loff_t *ppos)
+{
+ unsigned long left, unwritten, written = 0;
+
+ if (!count)
+ return 0;
+
+ if (!access_ok(VERIFY_WRITE, buf, count))
+ return -EFAULT;
+
+ left = count;
+
+ /* do we want to be clever? Arbitrary cut-off */
+ if (count >= PAGE_SIZE*4) {
+ unsigned long partial;
+
+ /* How much left of the page? */
+ partial = (PAGE_SIZE-1) & -(unsigned long) buf;
+ unwritten = clear_user(buf, partial);
+ written = partial - unwritten;
+ if (unwritten)
+ goto out;
+ left -= partial;
+ buf += partial;
+ unwritten = read_zero_pagealigned(buf, left & PAGE_MASK);
+ written += (left & PAGE_MASK) - unwritten;
+ if (unwritten)
+ goto out;
+ buf += left & PAGE_MASK;
+ left &= ~PAGE_MASK;
+ }
+ unwritten = clear_user(buf, left);
+ written += left - unwritten;
+out:
+ return written ? written : -EFAULT;
+}
+
+static int mmap_zero(struct file * file, struct vm_area_struct * vma)
+{
+ if (vma->vm_flags & VM_SHARED)
+ return shmem_zero_setup(vma);
+ if (zeromap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start, vma->vm_page_prot))
+ return -EAGAIN;
+ return 0;
+}
+#else /* CONFIG_MMU */
+static ssize_t read_zero(struct file * file, char * buf,
+ size_t count, loff_t *ppos)
+{
+ size_t todo = count;
+
+ while (todo) {
+ size_t chunk = todo;
+
+ if (chunk > 4096)
+ chunk = 4096; /* Just for latency reasons */
+ if (clear_user(buf, chunk))
+ return -EFAULT;
+ buf += chunk;
+ todo -= chunk;
+ cond_resched();
+ }
+ return count;
+}
+
+static int mmap_zero(struct file * file, struct vm_area_struct * vma)
+{
+ return -ENOSYS;
+}
+#endif /* CONFIG_MMU */
+
+static ssize_t write_full(struct file * file, const char __user * buf,
+ size_t count, loff_t *ppos)
+{
+ return -ENOSPC;
+}
+
+/*
+ * Special lseek() function for /dev/null and /dev/zero. Most notably, you
+ * can fopen() both devices with "a" now. This was previously impossible.
+ * -- SRB.
+ */
+
+static loff_t null_lseek(struct file * file, loff_t offset, int orig)
+{
+ return file->f_pos = 0;
+}
+
+/*
+ * The memory devices use the full 32/64 bits of the offset, and so we cannot
+ * check against negative addresses: they are ok. The return value is weird,
+ * though, in that case (0).
+ *
+ * also note that seeking relative to the "end of file" isn't supported:
+ * it has no meaning, so it returns -EINVAL.
+ */
+static loff_t memory_lseek(struct file * file, loff_t offset, int orig)
+{
+ loff_t ret;
+
+ down(&file->f_dentry->d_inode->i_sem);
+ switch (orig) {
+ case 0:
+ file->f_pos = offset;
+ ret = file->f_pos;
+ force_successful_syscall_return();
+ break;
+ case 1:
+ file->f_pos += offset;
+ ret = file->f_pos;
+ force_successful_syscall_return();
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ up(&file->f_dentry->d_inode->i_sem);
+ return ret;
+}
+
+static int open_port(struct inode * inode, struct file * filp)
+{
+ return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
+}
+
++#define mmap_mem mmap_kmem
+#define zero_lseek null_lseek
+#define full_lseek null_lseek
+#define write_zero write_null
+#define read_full read_zero
+#define open_mem open_port
+#define open_kmem open_mem
+
++#ifndef ARCH_HAS_DEV_MEM
+static struct file_operations mem_fops = {
+ .llseek = memory_lseek,
+ .read = read_mem,
+ .write = write_mem,
+ .mmap = mmap_mem,
+ .open = open_mem,
+};
++#else
++extern struct file_operations mem_fops;
++#endif
+
+static struct file_operations kmem_fops = {
+ .llseek = memory_lseek,
+ .read = read_kmem,
+ .write = write_kmem,
+ .mmap = mmap_kmem,
+ .open = open_kmem,
+};
+
+static struct file_operations null_fops = {
+ .llseek = null_lseek,
+ .read = read_null,
+ .write = write_null,
+};
+
+#if defined(CONFIG_ISA) || !defined(__mc68000__)
+static struct file_operations port_fops = {
+ .llseek = memory_lseek,
+ .read = read_port,
+ .write = write_port,
+ .open = open_port,
+};
+#endif
+
+static struct file_operations zero_fops = {
+ .llseek = zero_lseek,
+ .read = read_zero,
+ .write = write_zero,
+ .mmap = mmap_zero,
+};
+
+static struct file_operations full_fops = {
+ .llseek = full_lseek,
+ .read = read_full,
+ .write = write_full,
+};
+
+static ssize_t kmsg_write(struct file * file, const char __user * buf,
+ size_t count, loff_t *ppos)
+{
+ char *tmp;
+ int ret;
+
+ tmp = kmalloc(count + 1, GFP_KERNEL);
+ if (tmp == NULL)
+ return -ENOMEM;
+ ret = -EFAULT;
+ if (!copy_from_user(tmp, buf, count)) {
+ tmp[count] = 0;
+ ret = printk("%s", tmp);
+ }
+ kfree(tmp);
+ return ret;
+}
+
+static struct file_operations kmsg_fops = {
+ .write = kmsg_write,
+};
+
+static int memory_open(struct inode * inode, struct file * filp)
+{
+ switch (iminor(inode)) {
+ case 1:
+ filp->f_op = &mem_fops;
+ break;
+ case 2:
+ filp->f_op = &kmem_fops;
+ break;
+ case 3:
+ filp->f_op = &null_fops;
+ break;
+#if defined(CONFIG_ISA) || !defined(__mc68000__)
+ case 4:
+ filp->f_op = &port_fops;
+ break;
+#endif
+ case 5:
+ filp->f_op = &zero_fops;
+ break;
+ case 7:
+ filp->f_op = &full_fops;
+ break;
+ case 8:
+ filp->f_op = &random_fops;
+ break;
+ case 9:
+ filp->f_op = &urandom_fops;
+ break;
+ case 11:
+ filp->f_op = &kmsg_fops;
+ break;
+ default:
+ return -ENXIO;
+ }
+ if (filp->f_op && filp->f_op->open)
+ return filp->f_op->open(inode,filp);
+ return 0;
+}
+
+static struct file_operations memory_fops = {
+ .open = memory_open, /* just a selector for the real open */
+};
+
+static const struct {
+ unsigned int minor;
+ char *name;
+ umode_t mode;
+ struct file_operations *fops;
+} devlist[] = { /* list of minor devices */
+ {1, "mem", S_IRUSR | S_IWUSR | S_IRGRP, &mem_fops},
+ {2, "kmem", S_IRUSR | S_IWUSR | S_IRGRP, &kmem_fops},
+ {3, "null", S_IRUGO | S_IWUGO, &null_fops},
+#if defined(CONFIG_ISA) || !defined(__mc68000__)
+ {4, "port", S_IRUSR | S_IWUSR | S_IRGRP, &port_fops},
+#endif
+ {5, "zero", S_IRUGO | S_IWUGO, &zero_fops},
+ {7, "full", S_IRUGO | S_IWUGO, &full_fops},
+ {8, "random", S_IRUGO | S_IWUSR, &random_fops},
+ {9, "urandom", S_IRUGO | S_IWUSR, &urandom_fops},
+ {11,"kmsg", S_IRUGO | S_IWUSR, &kmsg_fops},
+};
+
+static struct class_simple *mem_class;
+
+static int __init chr_dev_init(void)
+{
+ int i;
+
+ if (register_chrdev(MEM_MAJOR,"mem",&memory_fops))
+ printk("unable to get major %d for memory devs\n", MEM_MAJOR);
+
+ mem_class = class_simple_create(THIS_MODULE, "mem");
+ for (i = 0; i < ARRAY_SIZE(devlist); i++) {
+ class_simple_device_add(mem_class,
+ MKDEV(MEM_MAJOR, devlist[i].minor),
+ NULL, devlist[i].name);
+ devfs_mk_cdev(MKDEV(MEM_MAJOR, devlist[i].minor),
+ S_IFCHR | devlist[i].mode, devlist[i].name);
+ }
+
+ return 0;
+}
+
+fs_initcall(chr_dev_init);
--- /dev/null
- obj-y += privcmd/
- obj-y += balloon/
+
+
+obj-y += console/
+obj-y += evtchn/
++obj-y += balloon/
+
++obj-$(CONFIG_XEN_PRIVILEGED_GUEST) += privcmd/
+obj-$(CONFIG_XEN_BLKDEV_BACKEND) += blkback/
+obj-$(CONFIG_XEN_NETDEV_BACKEND) += netback/
+obj-$(CONFIG_XEN_BLKDEV_FRONTEND) += blkfront/
+obj-$(CONFIG_XEN_NETDEV_FRONTEND) += netfront/
+
--- /dev/null
- #include <linux/module.h>
+/******************************************************************************
+ * balloon.c
+ *
+ * Xen balloon driver - enables returning/claiming memory to/from Xen.
+ *
+ * Copyright (c) 2003, B Dragovic
++ * Copyright (c) 2003-2004, M Williamson, K Fraser
+ *
+ * This file may be distributed separately from the Linux kernel, or
+ * incorporated into other software packages, subject to the following license:
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <linux/config.h>
- /* USER DEFINES -- THESE SHOULD BE COPIED TO USER-SPACE TOOLS */
- #define USER_INFLATE_BALLOON 1 /* return mem to hypervisor */
- #define USER_DEFLATE_BALLOON 2 /* claim mem from hypervisor */
- typedef struct user_balloon_op {
- unsigned int op;
- unsigned long size;
- } user_balloon_op_t;
- /* END OF USER DEFINE */
-
+#include <linux/kernel.h>
++#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/errno.h>
+#include <linux/mm.h>
+#include <linux/mman.h>
+#include <linux/smp_lock.h>
+#include <linux/pagemap.h>
+#include <linux/bootmem.h>
+#include <linux/highmem.h>
+#include <linux/vmalloc.h>
+#include <asm-xen/xen_proc.h>
+#include <asm-xen/hypervisor.h>
+#include <asm-xen/ctrl_if.h>
++#include <asm-xen/balloon.h>
+#include <asm/pgalloc.h>
+#include <asm/pgtable.h>
+#include <asm/uaccess.h>
+#include <asm/tlb.h>
+#include <linux/list.h>
+
- unsigned long credit;
- static unsigned long current_pages, most_seen_pages;
+static struct proc_dir_entry *balloon_pde;
+
- /* List of ballooned pages, threaded through the mem_map array. */
- LIST_HEAD(ballooned_pages);
++static DECLARE_MUTEX(balloon_mutex);
++spinlock_t balloon_lock = SPIN_LOCK_UNLOCKED;
++
++/* We aim for 'current allocation' == 'target allocation'. */
++static unsigned long current_pages;
++static unsigned long target_pages;
++
++/* We may hit the hard limit in Xen. If we do then we remember it. */
++static unsigned long hard_limit;
++
++/*
++ * Drivers may alter the memory reservation independently, but they must
++ * inform the balloon driver so that we can avoid hitting the hard limit.
++ */
++static unsigned long driver_pages;
++
++/* List of ballooned pages, threaded through the mem_map array. */
++static LIST_HEAD(ballooned_pages);
++static unsigned long balloon_low, balloon_high;
++
++/* Main work function, always executed in process context. */
++static void balloon_process(void *unused);
++static DECLARE_WORK(balloon_worker, balloon_process, NULL);
++static struct timer_list balloon_timer;
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
+/* Use the private and mapping fields of struct page as a list. */
+#define PAGE_TO_LIST(p) ( (struct list_head *)&p->private )
+#define LIST_TO_PAGE(l) ( list_entry( ((unsigned long *)l), \
+ struct page, private ) )
+#define UNLIST_PAGE(p) do { list_del(PAGE_TO_LIST(p)); \
+ p->mapping = NULL; \
+ p->private = 0; } while(0)
+#else
+/* There's a dedicated list field in struct page we can use. */
+#define PAGE_TO_LIST(p) ( &p->list )
+#define LIST_TO_PAGE(l) ( list_entry(l, struct page, list) )
+#define UNLIST_PAGE(p) ( list_del(&p->list) )
+#define pte_offset_kernel pte_offset
++#define subsys_initcall(_fn) __initcall(_fn)
+#endif
+
- /** add_ballooned_page - remember we've ballooned a pfn */
- void add_ballooned_page(unsigned long pfn)
++#define IPRINTK(fmt, args...) \
++ printk(KERN_INFO "xen_mem: " fmt, ##args)
++#define WPRINTK(fmt, args...) \
++ printk(KERN_WARNING "xen_mem: " fmt, ##args)
+
- struct page *p = mem_map + pfn;
-
- list_add(PAGE_TO_LIST(p), &ballooned_pages);
++/* balloon_append: add the given page to the balloon. */
++static void balloon_append(struct page *page)
+{
- /* rem_ballooned_page - recall a ballooned page and remove from list. */
- struct page *rem_ballooned_page(void)
++ /* Low memory is re-populated first, so highmem pages go at list tail. */
++ if ( PageHighMem(page) )
++ {
++ list_add_tail(PAGE_TO_LIST(page), &ballooned_pages);
++ balloon_high++;
++ }
++ else
++ {
++ list_add(PAGE_TO_LIST(page), &ballooned_pages);
++ balloon_low++;
++ }
+}
+
- if(!list_empty(&ballooned_pages))
- {
- struct page *ret;
++/* balloon_retrieve: rescue a page from the balloon, if it is not empty. */
++static struct page *balloon_retrieve(void)
+{
- ret = LIST_TO_PAGE(ballooned_pages.next);
- UNLIST_PAGE(ret);
++ struct page *page;
++
++ if ( list_empty(&ballooned_pages) )
++ return NULL;
+
- return ret;
- }
++ page = LIST_TO_PAGE(ballooned_pages.next);
++ UNLIST_PAGE(page);
+
- return NULL;
++ if ( PageHighMem(page) )
++ balloon_high--;
+ else
- pgd_t *pgd; pmd_t *pmd; pte_t *ptep;
- pgd = pgd_offset_k(addr);
++ balloon_low--;
++
++ return page;
+}
+
+static inline pte_t *get_ptep(unsigned long addr)
+{
- ptep = pte_offset_kernel(pmd, addr);
-
- return ptep;
++ pgd_t *pgd;
++ pmd_t *pmd;
+
++ pgd = pgd_offset_k(addr);
+ if ( pgd_none(*pgd) || pgd_bad(*pgd) ) BUG();
+
+ pmd = pmd_offset(pgd, addr);
+ if ( pmd_none(*pmd) || pmd_bad(*pmd) ) BUG();
+
- /* Main function for relinquishing memory. */
- static unsigned long inflate_balloon(unsigned long num_pages)
-
++ return pte_offset_kernel(pmd, addr);
+}
+
- unsigned long *parray;
- unsigned long *currp;
- unsigned long curraddr;
- unsigned long ret = 0;
- unsigned long i, j;
-
- parray = (unsigned long *)vmalloc(num_pages * sizeof(unsigned long));
- if ( parray == NULL )
- {
- printk(KERN_ERR "inflate_balloon: Unable to vmalloc parray\n");
- return -EFAULT;
- }
-
- currp = parray;
-
- for ( i = 0; i < num_pages; i++, currp++ )
- {
- struct page *page = alloc_page(GFP_HIGHUSER);
- unsigned long pfn = page - mem_map;
-
- /* If allocation fails then free all reserved pages. */
- if ( page == NULL )
- {
- printk(KERN_ERR "Unable to inflate balloon by %ld, only"
- " %ld pages free.", num_pages, i);
- currp = parray;
- for ( j = 0; j < i; j++, currp++ )
- __free_page((struct page *) (mem_map + *currp));
-
- ret = -EFAULT;
- goto cleanup;
- }
-
- *currp = pfn;
- }
-
-
- for ( i = 0, currp = parray; i < num_pages; i++, currp++ )
- {
- unsigned long mfn = phys_to_machine_mapping[*currp];
- curraddr = (unsigned long)page_address(mem_map + *currp);
- /* Blow away page contents for security, and also p.t. ref if any. */
- if ( curraddr != 0 )
- {
- scrub_pages(curraddr, 1);
- queue_l1_entry_update(get_ptep(curraddr), 0);
- }
- #ifdef CONFIG_XEN_SCRUB_PAGES
- else
- {
- void *p = kmap(&mem_map[*currp]);
- scrub_pages(p, 1);
- kunmap(&mem_map[*currp]);
- }
- #endif
-
- add_ballooned_page(*currp);
-
- phys_to_machine_mapping[*currp] = INVALID_P2M_ENTRY;
- *currp = mfn;
- }
-
- /* Flush updates through and flush the TLB. */
- xen_tlb_flush();
-
- ret = HYPERVISOR_dom_mem_op(MEMOP_decrease_reservation,
- parray, num_pages, 0);
- if ( unlikely(ret != num_pages) )
- {
- printk(KERN_ERR "Unable to inflate balloon, error %lx\n", ret);
- goto cleanup;
- }
-
- credit += num_pages;
- ret = num_pages;
-
- cleanup:
- vfree(parray);
++static void balloon_alarm(unsigned long unused)
+{
- return ret;
++ schedule_work(&balloon_worker);
++}
+
- * Install new mem pages obtained by deflate_balloon. function walks
- * phys->machine mapping table looking for DEAD entries and populates
- * them.
++static unsigned long current_target(void)
++{
++ unsigned long target = min(target_pages, hard_limit);
++ if ( target > (current_pages + balloon_low + balloon_high) )
++ target = current_pages + balloon_low + balloon_high;
++ return target;
+}
+
+/*
- static unsigned long process_returned_pages(unsigned long * parray,
- unsigned long num)
++ * We avoid multiple worker processes conflicting via the balloon mutex.
++ * We may of course race updates of the target counts (which are protected
++ * by the balloon lock), or with changes to the Xen hard limit, but we will
++ * recover from these in time.
+ */
- /* currently, this function is rather simplistic as
- * it is assumed that domain reclaims only number of
- * pages previously released. this is to change soon
- * and the code to extend page tables etc. will be
- * incorporated here.
- */
-
- unsigned long * curr = parray;
- unsigned long num_installed;
-
- struct page *page;
++static void balloon_process(void *unused)
+{
- num_installed = 0;
- while ( (page = rem_ballooned_page()) != NULL )
- {
- unsigned long pfn;
-
- if ( num_installed == num )
- break;
++ unsigned long *mfn_list, pfn, i, flags;
++ struct page *page;
++ long credit, debt, rc;
++ void *v;
+
- pfn = page - mem_map;
++ down(&balloon_mutex);
+
- if(phys_to_machine_mapping[pfn] != INVALID_P2M_ENTRY)
++ retry:
++ mfn_list = NULL;
+
- printk("BUG: Tried to unballoon existing page!");
- BUG();
++ if ( (credit = current_target() - current_pages) > 0 )
++ {
++ mfn_list = (unsigned long *)vmalloc(credit * sizeof(*mfn_list));
++ if ( mfn_list == NULL )
++ goto out;
++
++ balloon_lock(flags);
++ rc = HYPERVISOR_dom_mem_op(
++ MEMOP_increase_reservation, mfn_list, credit, 0);
++ balloon_unlock(flags);
++ if ( rc < credit )
+ {
- phys_to_machine_mapping[pfn] = *curr;
- queue_machphys_update(*curr, pfn);
- if (pfn<max_low_pfn)
- queue_l1_entry_update(
- get_ptep((unsigned long)__va(pfn << PAGE_SHIFT)),
- ((*curr) << PAGE_SHIFT) | pgprot_val(PAGE_KERNEL));
-
- __free_page(mem_map + pfn);
-
- curr++;
- num_installed++;
- }
-
- return num_installed;
- }
-
- unsigned long deflate_balloon(unsigned long num_pages)
- {
- unsigned long ret;
- unsigned long * parray;
-
- if ( num_pages > credit )
- {
- printk(KERN_ERR "deflate_balloon: %lu pages > %lu credit.\n",
- num_pages, credit);
- return -EAGAIN;
- }
-
- parray = (unsigned long *)vmalloc(num_pages * sizeof(unsigned long));
- if ( parray == NULL )
- {
- printk(KERN_ERR "deflate_balloon: Unable to vmalloc parray\n");
- return 0;
- }
++ /* We hit the Xen hard limit: reprobe. */
++ if ( HYPERVISOR_dom_mem_op(
++ MEMOP_decrease_reservation, mfn_list, rc, 0) != rc )
++ BUG();
++ hard_limit = current_pages + rc - driver_pages;
++ vfree(mfn_list);
++ goto retry;
+ }
+
- ret = HYPERVISOR_dom_mem_op(MEMOP_increase_reservation,
- parray, num_pages, 0);
- if ( unlikely(ret != num_pages) )
- {
- printk(KERN_ERR "deflate_balloon: xen increase_reservation err %lx\n",
- ret);
- goto cleanup;
++ for ( i = 0; i < credit; i++ )
++ {
++ if ( (page = balloon_retrieve()) == NULL )
++ BUG();
++
++ pfn = page - mem_map;
++ if ( phys_to_machine_mapping[pfn] != INVALID_P2M_ENTRY )
++ BUG();
++
++ /* Update P->M and M->P tables. */
++ phys_to_machine_mapping[pfn] = mfn_list[i];
++ queue_machphys_update(mfn_list[i], pfn);
++
++ /* Link back into the page tables if it's not a highmem page. */
++ if ( pfn < max_low_pfn )
++ queue_l1_entry_update(
++ get_ptep((unsigned long)__va(pfn << PAGE_SHIFT)),
++ (mfn_list[i] << PAGE_SHIFT) | pgprot_val(PAGE_KERNEL));
++
++ /* Finally, relinquish the memory back to the system allocator. */
++ ClearPageReserved(page);
++ set_page_count(page, 1);
++ __free_page(page);
++ }
+
-
- if ( (ret = process_returned_pages(parray, num_pages)) < num_pages )
++ current_pages += credit;
+ }
- printk(KERN_WARNING
- "deflate_balloon: restored only %lx of %lx pages.\n",
- ret, num_pages);
- goto cleanup;
- }
-
- ret = num_pages;
- credit -= num_pages;
++ else if ( credit < 0 )
+ {
- cleanup:
- vfree(parray);
++ debt = -credit;
+
- return ret;
- }
-
- #define PAGE_TO_MB_SHIFT 8
-
- /*
- * pagetable_extend() mimics pagetable_init() from arch/xen/mm/init.c
- * The loops do go through all of low memory (ZONE_NORMAL). The
- * old pages have _PAGE_PRESENT set and so get skipped.
- * If low memory is not full, the new pages are used to fill it, going
- * from cur_low_pfn to low_pfn. high memory is not direct mapped so
- * no extension is needed for new high memory.
- */
-
- static void pagetable_extend (int cur_low_pfn, int newpages)
- {
- unsigned long vaddr, end;
- pgd_t *kpgd, *pgd, *pgd_base;
- int i, j, k;
- pmd_t *kpmd, *pmd;
- pte_t *kpte, *pte, *pte_base;
- int low_pfn = min(cur_low_pfn+newpages,(int)max_low_pfn);
-
- /*
- * This can be zero as well - no problem, in that case we exit
- * the loops anyway due to the PTRS_PER_* conditions.
- */
- end = (unsigned long)__va(low_pfn*PAGE_SIZE);
-
- pgd_base = init_mm.pgd;
- #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
- i = pgd_index(PAGE_OFFSET);
- #else
- i = __pgd_offset(PAGE_OFFSET);
- #endif
- pgd = pgd_base + i;
-
- for (; i < PTRS_PER_PGD; pgd++, i++) {
- vaddr = i*PGDIR_SIZE;
- if (end && (vaddr >= end))
- break;
- pmd = (pmd_t *)pgd;
- for (j = 0; j < PTRS_PER_PMD; pmd++, j++) {
- vaddr = i*PGDIR_SIZE + j*PMD_SIZE;
- if (end && (vaddr >= end))
++ mfn_list = (unsigned long *)vmalloc(debt * sizeof(*mfn_list));
++ if ( mfn_list == NULL )
++ goto out;
+
-
- /* Filled in for us already? */
- if ( pmd_val(*pmd) & _PAGE_PRESENT )
- continue;
-
- pte_base = pte = (pte_t *) __get_free_page(GFP_KERNEL);
-
- for (k = 0; k < PTRS_PER_PTE; pte++, k++) {
- vaddr = i*PGDIR_SIZE + j*PMD_SIZE + k*PAGE_SIZE;
- if (end && (vaddr >= end))
- break;
- #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
- *pte = mk_pte(virt_to_page(vaddr), PAGE_KERNEL);
- #else
- *pte = mk_pte_phys(__pa(vaddr), PAGE_KERNEL);
- #endif
++ for ( i = 0; i < debt; i++ )
++ {
++ if ( (page = alloc_page(GFP_HIGHUSER)) == NULL )
++ {
++ debt = i;
+ break;
- kpgd = pgd_offset_k((unsigned long)pte_base);
- kpmd = pmd_offset(kpgd, (unsigned long)pte_base);
- kpte = pte_offset_kernel(kpmd, (unsigned long)pte_base);
- queue_l1_entry_update(kpte,
- (*(unsigned long *)kpte)&~_PAGE_RW);
- set_pmd(pmd, __pmd(_KERNPG_TABLE + __pa(pte_base)));
- XEN_flush_page_update_queue();
- }
- }
- }
-
- /*
- * claim_new_pages() asks xen to increase this domain's memory reservation
- * and return a list of the new pages of memory. This new pages are
- * added to the free list of the memory manager.
- *
- * Available RAM does not normally change while Linux runs. To make this work,
- * the linux mem= boottime command line param must say how big memory could
- * possibly grow. Then setup_arch() in arch/xen/kernel/setup.c
- * sets max_pfn, max_low_pfn and the zones according to
- * this max memory size. The page tables themselves can only be
- * extended after xen has assigned new pages to this domain.
- */
-
- static unsigned long
- claim_new_pages(unsigned long num_pages)
- {
- unsigned long new_page_cnt, pfn;
- unsigned long * parray, *curr;
+ }
- if (most_seen_pages+num_pages> max_pfn)
- num_pages = max_pfn-most_seen_pages;
- if (num_pages==0) return -EINVAL;
+
- parray = (unsigned long *)vmalloc(num_pages * sizeof(unsigned long));
- if ( parray == NULL )
- {
- printk(KERN_ERR "claim_new_pages: Unable to vmalloc parray\n");
- return 0;
- }
++ pfn = page - mem_map;
++ mfn_list[i] = phys_to_machine_mapping[pfn];
++ phys_to_machine_mapping[pfn] = INVALID_P2M_ENTRY;
+
- new_page_cnt = HYPERVISOR_dom_mem_op(MEMOP_increase_reservation,
- parray, num_pages, 0);
- if ( new_page_cnt != num_pages )
- {
- printk(KERN_WARNING
- "claim_new_pages: xen granted only %lu of %lu requested pages\n",
- new_page_cnt, num_pages);
-
- /*
- * Avoid xen lockup when user forgot to setdomainmaxmem. Xen
- * usually can dribble out a few pages and then hangs.
- */
- if ( new_page_cnt < 1000 )
- {
- printk(KERN_WARNING "Remember to use setdomainmaxmem\n");
- HYPERVISOR_dom_mem_op(MEMOP_decrease_reservation,
- parray, new_page_cnt, 0);
- return -EFAULT;
++ if ( !PageHighMem(page) )
++ {
++ v = phys_to_virt((page - mem_map) << PAGE_SHIFT);
++ scrub_pages(v, 1);
++ queue_l1_entry_update(get_ptep((unsigned long)v), 0);
++ }
++#ifdef CONFIG_XEN_SCRUB_PAGES
++ else
++ {
++ v = kmap(page);
++ scrub_pages(v, 1);
++ kunmap(page);
++ }
++#endif
+
- }
- memcpy(phys_to_machine_mapping+most_seen_pages, parray,
- new_page_cnt * sizeof(unsigned long));
++ balloon_append(page);
+ }
- pagetable_extend(most_seen_pages,new_page_cnt);
+
- for ( pfn = most_seen_pages, curr = parray;
- pfn < most_seen_pages+new_page_cnt;
- pfn++, curr++ )
- {
- struct page *page = mem_map + pfn;
++ /* Flush updates through and flush the TLB. */
++ xen_tlb_flush();
+
- #ifndef CONFIG_HIGHMEM
- if ( pfn>=max_low_pfn )
- {
- printk(KERN_WARNING "Warning only %ldMB will be used.\n",
- pfn>>PAGE_TO_MB_SHIFT);
- printk(KERN_WARNING "Use a HIGHMEM enabled kernel.\n");
- break;
- }
- #endif
- queue_machphys_update(*curr, pfn);
- if ( pfn < max_low_pfn )
- queue_l1_entry_update(
- get_ptep((unsigned long)__va(pfn << PAGE_SHIFT)),
- ((*curr) << PAGE_SHIFT) | pgprot_val(PAGE_KERNEL));
-
- XEN_flush_page_update_queue();
-
- /* this next bit mimics arch/xen/mm/init.c:one_highpage_init() */
- ClearPageReserved(page);
- if ( pfn >= max_low_pfn )
- set_bit(PG_highmem, &page->flags);
- set_page_count(page, 1);
- __free_page(page);
++ if ( HYPERVISOR_dom_mem_op(
++ MEMOP_decrease_reservation, mfn_list, debt, 0) != debt )
++ BUG();
+
- vfree(parray);
++ current_pages -= debt;
+ }
+
- return new_page_cnt;
- }
++ out:
++ if ( mfn_list != NULL )
++ vfree(mfn_list);
+
- static int balloon_try_target(int target)
++ /* Schedule more work if there is some still to be done. */
++ if ( current_target() != current_pages )
++ mod_timer(&balloon_timer, jiffies + HZ);
+
++ up(&balloon_mutex);
++}
+
- int change, reclaim;
-
- if ( target < current_pages )
- {
- int change = inflate_balloon(current_pages-target);
- if ( change <= 0 )
- return change;
-
- current_pages -= change;
- printk(KERN_INFO "Relinquish %dMB to xen. Domain now has %luMB\n",
- change>>PAGE_TO_MB_SHIFT, current_pages>>PAGE_TO_MB_SHIFT);
- }
- else if ( target > current_pages )
- {
- reclaim = min((unsigned long)target,most_seen_pages) - current_pages;
-
- if ( reclaim )
- {
- change = deflate_balloon( reclaim );
- if ( change <= 0 )
- return change;
- current_pages += change;
- printk(KERN_INFO "Reclaim %dMB from xen. Domain now has %luMB\n",
- change>>PAGE_TO_MB_SHIFT, current_pages>>PAGE_TO_MB_SHIFT);
- }
-
- if ( most_seen_pages < target )
- {
- int growth = claim_new_pages(target-most_seen_pages);
- if ( growth <= 0 )
- return growth;
- most_seen_pages += growth;
- current_pages += growth;
- printk(KERN_INFO "Granted %dMB new mem. Dom now has %luMB\n",
- growth>>PAGE_TO_MB_SHIFT, current_pages>>PAGE_TO_MB_SHIFT);
- }
- }
-
- return 1;
++/* Resets the Xen limit, sets new target, and kicks off processing. */
++static void set_new_target(unsigned long target)
+{
-
++ /* No need for lock. Not read-modify-write updates. */
++ hard_limit = ~0UL;
++ target_pages = target;
++ schedule_work(&balloon_worker);
+}
+
- {
- mem_request_t *req = (mem_request_t *)&msg->msg[0];
- req->status = balloon_try_target(req->target);
- }
- break;
+static void balloon_ctrlif_rx(ctrl_msg_t *msg, unsigned long id)
+{
+ switch ( msg->subtype )
+ {
+ case CMSG_MEM_REQUEST_SET:
++ {
++ mem_request_t *req = (mem_request_t *)&msg->msg[0];
+ if ( msg->length != sizeof(mem_request_t) )
+ goto parse_error;
- #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
- typedef size_t count_t;
- #else
- typedef u_long count_t;
- #endif
-
- static int do_balloon_write(const char *buffer, count_t count)
++ set_new_target(req->target);
++ req->status = 0;
++ }
++ break;
+ default:
+ goto parse_error;
+ }
+
+ ctrl_if_send_response(msg);
+ return;
+
+ parse_error:
+ msg->length = 0;
+ ctrl_if_send_response(msg);
+}
+
- int len, i;
- unsigned long target;
- unsigned long long targetbytes;
++static int balloon_write(struct file *file, const char __user *buffer,
++ unsigned long count, void *data)
+{
+ char memstring[64], *endchar;
- /* Only admin can play with the balloon :) */
++ unsigned long long target_bytes;
+
- return -EFBIG;
+ if ( !capable(CAP_SYS_ADMIN) )
+ return -EPERM;
+
++ if ( count <= 1 )
++ return -EBADMSG; /* runt */
+ if ( count > sizeof(memstring) )
- len = strnlen_user(buffer, count);
- if ( len == 0 ) return -EBADMSG;
- if ( len == 1 ) return 1; /* input starts with a NUL char */
- if ( strncpy_from_user(memstring, buffer, len) < 0 )
++ return -EFBIG; /* too long */
+
- endchar = memstring;
- for ( i = 0; i < len; ++i, ++endchar )
- if ( (memstring[i] < '0') || (memstring[i] > '9') )
- break;
- if ( i == 0 )
- return -EBADMSG;
-
- targetbytes = memparse(memstring,&endchar);
- target = targetbytes >> PAGE_SHIFT;
-
- i = balloon_try_target(target);
-
- if ( i <= 0 ) return i;
-
- return len;
- }
-
- #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
- static int balloon_write(struct file *file, const char *buffer,
- size_t count, loff_t *offp)
- {
- int len = do_balloon_write(buffer, count);
-
- if ( len <= 0 ) return len;
++ if ( copy_from_user(memstring, buffer, count) )
+ return -EFAULT;
++ memstring[sizeof(memstring)-1] = '\0';
+
- *offp += len;
- return len;
++ target_bytes = memparse(memstring, &endchar);
++ set_new_target(target_bytes >> PAGE_SHIFT);
+
- static int balloon_read(struct file *filp, char *buffer,
- size_t count, loff_t *offp)
++ return count;
+}
+
- static char priv_buf[32];
- char *priv_bufp = priv_buf;
++static int balloon_read(char *page, char **start, off_t off,
++ int count, int *eof, void *data)
+{
- len = sprintf(priv_buf,"%lu\n",current_pages<<PAGE_SHIFT);
-
- len -= *offp;
- priv_bufp += *offp;
- if (len>count) len = count;
- if (len<0) len = 0;
+ int len;
- if ( copy_to_user(buffer, priv_bufp, len) != 0 )
- return -EFAULT;
+
- *offp += len;
++#define K(_p) ((_p)<<(PAGE_SHIFT-10))
++ len = sprintf(
++ page,
++ "Current allocation: %8lu kB\n"
++ "Requested target: %8lu kB\n"
++ "Low-mem balloon: %8lu kB\n"
++ "High-mem balloon: %8lu kB\n"
++ "Xen hard limit: ",
++ K(current_pages), K(target_pages), K(balloon_low), K(balloon_high));
++
++ if ( hard_limit != ~0UL )
++ len += sprintf(
++ page + len,
++ "%8lu kB (inc. %8lu kB driver headroom)\n",
++ K(hard_limit), K(driver_pages));
++ else
++ len += sprintf(
++ page + len,
++ " ??? kB\n");
+
- static struct file_operations balloon_fops = {
- .read = balloon_read,
- .write = balloon_write
- };
-
- #else
-
- static int balloon_write(struct file *file, const char *buffer,
- u_long count, void *data)
- {
- return do_balloon_write(buffer, count);
- }
-
- static int balloon_read(char *page, char **start, off_t off,
- int count, int *eof, void *data)
++ *eof = 1;
+ return len;
+}
+
- int len;
- len = sprintf(page,"%lu\n",current_pages<<PAGE_SHIFT);
-
- if (len <= off+count) *eof = 1;
- *start = page + off;
- len -= off;
- if (len>count) len = count;
- if (len<0) len = 0;
- return len;
- }
++static int __init balloon_init(void)
+{
- #endif
++ unsigned long pfn;
++ struct page *page;
+
- static int __init balloon_init(void)
- {
- printk(KERN_ALERT "Starting Xen Balloon driver\n");
++ IPRINTK("Initialising balloon driver.\n");
+
- most_seen_pages = current_pages = min(xen_start_info.nr_pages,max_pfn);
- if ( (balloon_pde = create_xen_proc_entry("memory_target", 0644)) == NULL )
++ current_pages = min(xen_start_info.nr_pages, max_pfn);
++ target_pages = current_pages;
++ balloon_low = 0;
++ balloon_high = 0;
++ driver_pages = 0UL;
++ hard_limit = ~0UL;
+
- printk(KERN_ALERT "Unable to create balloon driver proc entry!");
++ init_timer(&balloon_timer);
++ balloon_timer.data = 0;
++ balloon_timer.function = balloon_alarm;
++
++ if ( (balloon_pde = create_xen_proc_entry("balloon", 0644)) == NULL )
+ {
- #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
- balloon_pde->owner = THIS_MODULE;
- balloon_pde->nlink = 1;
- balloon_pde->proc_fops = &balloon_fops;
- #else
- balloon_pde->write_proc = balloon_write;
++ WPRINTK("Unable to create /proc/xen/balloon.\n");
+ return -1;
+ }
+
- #endif
+ balloon_pde->read_proc = balloon_read;
- (void)ctrl_if_register_receiver(CMSG_MEM_REQUEST, balloon_ctrlif_rx,
- CALLBACK_IN_BLOCKING_CONTEXT);
++ balloon_pde->write_proc = balloon_write;
+
- /*
- * make_module a new phys map if mem= says xen can give us memory to grow
- */
- if ( max_pfn > xen_start_info.nr_pages )
++ (void)ctrl_if_register_receiver(CMSG_MEM_REQUEST, balloon_ctrlif_rx, 0);
+
- extern unsigned long *phys_to_machine_mapping;
- unsigned long *newmap;
- newmap = (unsigned long *)vmalloc(max_pfn * sizeof(unsigned long));
- memset(newmap, ~0, max_pfn * sizeof(unsigned long));
- memcpy(newmap, phys_to_machine_mapping,
- xen_start_info.nr_pages * sizeof(unsigned long));
- phys_to_machine_mapping = newmap;
++ /* Initialise the balloon with excess memory space. */
++ for ( pfn = xen_start_info.nr_pages; pfn < max_pfn; pfn++ )
+ {
- static void __exit balloon_cleanup(void)
++ page = &mem_map[pfn];
++ if ( !PageReserved(page) )
++ balloon_append(page);
+ }
+
+ return 0;
+}
+
- if ( balloon_pde != NULL )
- {
- remove_xen_proc_entry("memory_target");
- balloon_pde = NULL;
- }
++subsys_initcall(balloon_init);
++
++void balloon_update_driver_allowance(long delta)
+{
- module_init(balloon_init);
- module_exit(balloon_cleanup);
++ unsigned long flags;
++ balloon_lock(flags);
++ driver_pages += delta; /* non-atomic update */
++ balloon_unlock(flags);
++}
++
++void balloon_put_pages(unsigned long *mfn_list, unsigned long nr_mfns)
++{
++ unsigned long flags;
++
++ balloon_lock(flags);
++ if ( HYPERVISOR_dom_mem_op(MEMOP_decrease_reservation,
++ mfn_list, nr_mfns, 0) != nr_mfns )
++ BUG();
++ current_pages -= nr_mfns; /* non-atomic update */
++ balloon_unlock(flags);
++
++ schedule_work(&balloon_worker);
+}
+
++EXPORT_SYMBOL(balloon_update_driver_allowance);
++EXPORT_SYMBOL(balloon_put_pages);
--- /dev/null
- * may be outstanding requests at the disc whose asynchronous responses
- * must still be notified to the remote driver.
+/******************************************************************************
+ * arch/xen/drivers/netif/backend/interface.c
+ *
+ * Network-device interface management.
+ *
+ * Copyright (c) 2004, Keir Fraser
+ */
+
+#include "common.h"
+#include <linux/rtnetlink.h>
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
+#define VMALLOC_VMADDR(x) ((unsigned long)(x))
+#endif
+
+#define NETIF_HASHSZ 1024
+#define NETIF_HASH(_d,_h) (((int)(_d)^(int)(_h))&(NETIF_HASHSZ-1))
+
+static netif_t *netif_hash[NETIF_HASHSZ];
+
+netif_t *netif_find_by_handle(domid_t domid, unsigned int handle)
+{
+ netif_t *netif = netif_hash[NETIF_HASH(domid, handle)];
+ while ( (netif != NULL) &&
+ ((netif->domid != domid) || (netif->handle != handle)) )
+ netif = netif->hash_next;
+ return netif;
+}
+
+static void __netif_disconnect_complete(void *arg)
+{
+ netif_t *netif = (netif_t *)arg;
+ ctrl_msg_t cmsg;
+ netif_be_disconnect_t disc;
+
+ /*
+ * These can't be done in netif_disconnect() because at that point there
- char name[IFNAMSIZ] = {};
++ * may be outstanding requests in the network stack whose asynchronous
++ * responses must still be notified to the remote driver.
+ */
+ unbind_evtchn_from_irq(netif->evtchn);
+ vfree(netif->tx); /* Frees netif->rx as well. */
+ rtnl_lock();
+ (void)dev_close(netif->dev);
+ rtnl_unlock();
+
+ /* Construct the deferred response message. */
+ cmsg.type = CMSG_NETIF_BE;
+ cmsg.subtype = CMSG_NETIF_BE_DISCONNECT;
+ cmsg.id = netif->disconnect_rspid;
+ cmsg.length = sizeof(netif_be_disconnect_t);
+ disc.domid = netif->domid;
+ disc.netif_handle = netif->handle;
+ disc.status = NETIF_BE_STATUS_OKAY;
+ memcpy(cmsg.msg, &disc, sizeof(disc));
+
+ /*
+ * Make sure message is constructed /before/ status change, because
+ * after the status change the 'netif' structure could be deallocated at
+ * any time. Also make sure we send the response /after/ status change,
+ * as otherwise a subsequent CONNECT request could spuriously fail if
+ * another CPU doesn't see the status change yet.
+ */
+ mb();
+ if ( netif->status != DISCONNECTING )
+ BUG();
+ netif->status = DISCONNECTED;
+ mb();
+
+ /* Send the successful response. */
+ ctrl_if_send_response(&cmsg);
+}
+
+void netif_disconnect_complete(netif_t *netif)
+{
+ INIT_WORK(&netif->work, __netif_disconnect_complete, (void *)netif);
+ schedule_work(&netif->work);
+}
+
+void netif_create(netif_be_create_t *create)
+{
+ int err = 0;
+ domid_t domid = create->domid;
+ unsigned int handle = create->netif_handle;
+ struct net_device *dev;
+ netif_t **pnetif, *netif;
- kfree(dev);
++ char name[IFNAMSIZ];
+
+ snprintf(name, IFNAMSIZ - 1, "vif%u.%u", domid, handle);
+ dev = alloc_netdev(sizeof(netif_t), name, ether_setup);
+ if ( dev == NULL )
+ {
+ DPRINTK("Could not create netif: out of memory\n");
+ create->status = NETIF_BE_STATUS_OUT_OF_MEMORY;
+ return;
+ }
+
+ netif = dev->priv;
+ memset(netif, 0, sizeof(*netif));
+ netif->domid = domid;
+ netif->handle = handle;
+ netif->status = DISCONNECTED;
+ spin_lock_init(&netif->rx_lock);
+ spin_lock_init(&netif->tx_lock);
+ atomic_set(&netif->refcnt, 0);
+ netif->dev = dev;
+
+ netif->credit_bytes = netif->remaining_credit = ~0UL;
+ netif->credit_usec = 0UL;
+ /*init_ac_timer(&new_vif->credit_timeout);*/
+
+ pnetif = &netif_hash[NETIF_HASH(domid, handle)];
+ while ( *pnetif != NULL )
+ {
+ if ( ((*pnetif)->domid == domid) && ((*pnetif)->handle == handle) )
+ {
+ DPRINTK("Could not create netif: already exists\n");
+ create->status = NETIF_BE_STATUS_INTERFACE_EXISTS;
- kfree(dev);
++ free_netdev(dev);
+ return;
+ }
+ pnetif = &(*pnetif)->hash_next;
+ }
+
+ dev->hard_start_xmit = netif_be_start_xmit;
+ dev->get_stats = netif_be_get_stats;
+ memcpy(dev->dev_addr, create->mac, ETH_ALEN);
+
+ /* Disable queuing. */
+ dev->tx_queue_len = 0;
+
+ /* Force a different MAC from remote end. */
+ dev->dev_addr[2] ^= 1;
+
+ if ( (err = register_netdev(dev)) != 0 )
+ {
+ DPRINTK("Could not register new net device %s: err=%d\n",
+ dev->name, err);
+ create->status = NETIF_BE_STATUS_OUT_OF_MEMORY;
- kfree(netif->dev);
++ free_netdev(dev);
+ return;
+ }
+
+ netif->hash_next = *pnetif;
+ *pnetif = netif;
+
+ DPRINTK("Successfully created netif\n");
+ create->status = NETIF_BE_STATUS_OKAY;
+}
+
+void netif_destroy(netif_be_destroy_t *destroy)
+{
+ domid_t domid = destroy->domid;
+ unsigned int handle = destroy->netif_handle;
+ netif_t **pnetif, *netif;
+
+ pnetif = &netif_hash[NETIF_HASH(domid, handle)];
+ while ( (netif = *pnetif) != NULL )
+ {
+ if ( (netif->domid == domid) && (netif->handle == handle) )
+ {
+ if ( netif->status != DISCONNECTED )
+ goto still_connected;
+ goto destroy;
+ }
+ pnetif = &netif->hash_next;
+ }
+
+ destroy->status = NETIF_BE_STATUS_INTERFACE_NOT_FOUND;
+ return;
+
+ still_connected:
+ destroy->status = NETIF_BE_STATUS_INTERFACE_CONNECTED;
+ return;
+
+ destroy:
+ *pnetif = netif->hash_next;
+ unregister_netdev(netif->dev);
++ free_netdev(netif->dev);
+ destroy->status = NETIF_BE_STATUS_OKAY;
+}
+
+void netif_connect(netif_be_connect_t *connect)
+{
+ domid_t domid = connect->domid;
+ unsigned int handle = connect->netif_handle;
+ unsigned int evtchn = connect->evtchn;
+ unsigned long tx_shmem_frame = connect->tx_shmem_frame;
+ unsigned long rx_shmem_frame = connect->rx_shmem_frame;
+ struct vm_struct *vma;
+ pgprot_t prot;
+ int error;
+ netif_t *netif;
+#if 0
+ struct net_device *eth0_dev;
+#endif
+
+ netif = netif_find_by_handle(domid, handle);
+ if ( unlikely(netif == NULL) )
+ {
+ DPRINTK("netif_connect attempted for non-existent netif (%u,%u)\n",
+ connect->domid, connect->netif_handle);
+ connect->status = NETIF_BE_STATUS_INTERFACE_NOT_FOUND;
+ return;
+ }
+
+ if ( netif->status != DISCONNECTED )
+ {
+ connect->status = NETIF_BE_STATUS_INTERFACE_CONNECTED;
+ return;
+ }
+
+ if ( (vma = get_vm_area(2*PAGE_SIZE, VM_IOREMAP)) == NULL )
+ {
+ connect->status = NETIF_BE_STATUS_OUT_OF_MEMORY;
+ return;
+ }
+
+ prot = __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED);
+ error = direct_remap_area_pages(&init_mm,
+ VMALLOC_VMADDR(vma->addr),
+ tx_shmem_frame<<PAGE_SHIFT, PAGE_SIZE,
+ prot, domid);
+ error |= direct_remap_area_pages(&init_mm,
+ VMALLOC_VMADDR(vma->addr) + PAGE_SIZE,
+ rx_shmem_frame<<PAGE_SHIFT, PAGE_SIZE,
+ prot, domid);
+ if ( error != 0 )
+ {
+ if ( error == -ENOMEM )
+ connect->status = NETIF_BE_STATUS_OUT_OF_MEMORY;
+ else if ( error == -EFAULT )
+ connect->status = NETIF_BE_STATUS_MAPPING_ERROR;
+ else
+ connect->status = NETIF_BE_STATUS_ERROR;
+ vfree(vma->addr);
+ return;
+ }
+
+ netif->evtchn = evtchn;
+ netif->irq = bind_evtchn_to_irq(evtchn);
+ netif->tx_shmem_frame = tx_shmem_frame;
+ netif->rx_shmem_frame = rx_shmem_frame;
+ netif->tx =
+ (netif_tx_interface_t *)vma->addr;
+ netif->rx =
+ (netif_rx_interface_t *)((char *)vma->addr + PAGE_SIZE);
+ netif->status = CONNECTED;
+ netif_get(netif);
+
+ netif->tx->resp_prod = netif->rx->resp_prod = 0;
+
+ rtnl_lock();
+ (void)dev_open(netif->dev);
+ rtnl_unlock();
+
+ (void)request_irq(netif->irq, netif_be_int, 0, netif->dev->name, netif);
+ netif_start_queue(netif->dev);
+
+ connect->status = NETIF_BE_STATUS_OKAY;
+}
+
+int netif_disconnect(netif_be_disconnect_t *disconnect, u8 rsp_id)
+{
+ domid_t domid = disconnect->domid;
+ unsigned int handle = disconnect->netif_handle;
+ netif_t *netif;
+
+ netif = netif_find_by_handle(domid, handle);
+ if ( unlikely(netif == NULL) )
+ {
+ DPRINTK("netif_disconnect attempted for non-existent netif"
+ " (%u,%u)\n", disconnect->domid, disconnect->netif_handle);
+ disconnect->status = NETIF_BE_STATUS_INTERFACE_NOT_FOUND;
+ return 1; /* Caller will send response error message. */
+ }
+
+ if ( netif->status == CONNECTED )
+ {
+ netif->status = DISCONNECTING;
+ netif->disconnect_rspid = rsp_id;
+ wmb(); /* Let other CPUs see the status change. */
+ netif_stop_queue(netif->dev);
+ free_irq(netif->irq, netif);
+ netif_deschedule(netif);
+ netif_put(netif);
+ return 0; /* Caller should not send response message. */
+ }
+
+ disconnect->status = NETIF_BE_STATUS_OKAY;
+ return 1;
+}
+
+void netif_interface_init(void)
+{
+ memset(netif_hash, 0, sizeof(netif_hash));
+}
--- /dev/null
- static void __refresh_mfn_list(void)
- {
- int ret = HYPERVISOR_dom_mem_op(MEMOP_increase_reservation,
- mfn_list, MAX_MFN_ALLOC, 0);
- if ( unlikely(ret != MAX_MFN_ALLOC) )
- BUG();
- alloc_index = MAX_MFN_ALLOC;
- }
-
- static unsigned long get_new_mfn(void)
+/******************************************************************************
+ * arch/xen/drivers/netif/backend/main.c
+ *
+ * Back-end of the driver for virtual block devices. This portion of the
+ * driver exports a 'unified' block-device interface that can be accessed
+ * by any operating system that implements a compatible front end. A
+ * reference front-end implementation can be found in:
+ * arch/xen/drivers/netif/frontend
+ *
+ * Copyright (c) 2002-2004, K A Fraser
+ */
+
+#include "common.h"
++#include <asm-xen/balloon.h>
+
+static void netif_page_release(struct page *page);
+static void netif_skb_release(struct sk_buff *skb);
+static void make_tx_response(netif_t *netif,
+ u16 id,
+ s8 st);
+static int make_rx_response(netif_t *netif,
+ u16 id,
+ s8 st,
+ memory_t addr,
+ u16 size);
+
+static void net_tx_action(unsigned long unused);
+static DECLARE_TASKLET(net_tx_tasklet, net_tx_action, 0);
+
+static void net_rx_action(unsigned long unused);
+static DECLARE_TASKLET(net_rx_tasklet, net_rx_action, 0);
+
++static struct timer_list net_timer;
++
+static struct sk_buff_head rx_queue;
+static multicall_entry_t rx_mcl[NETIF_RX_RING_SIZE*2];
+static mmu_update_t rx_mmu[NETIF_RX_RING_SIZE*3];
+static unsigned char rx_notify[NR_EVENT_CHANNELS];
+
+/* Don't currently gate addition of an interface to the tx scheduling list. */
+#define tx_work_exists(_if) (1)
+
+#define MAX_PENDING_REQS 256
+static unsigned long mmap_vstart;
+#define MMAP_VADDR(_req) (mmap_vstart + ((_req) * PAGE_SIZE))
+
+#define PKT_PROT_LEN 64
+
+static struct {
+ netif_tx_request_t req;
+ netif_t *netif;
+} pending_tx_info[MAX_PENDING_REQS];
+static u16 pending_ring[MAX_PENDING_REQS];
+typedef unsigned int PEND_RING_IDX;
+#define MASK_PEND_IDX(_i) ((_i)&(MAX_PENDING_REQS-1))
+static PEND_RING_IDX pending_prod, pending_cons;
+#define NR_PENDING_REQS (MAX_PENDING_REQS - pending_prod + pending_cons)
+
+/* Freed TX SKBs get batched on this ring before return to pending_ring. */
+static u16 dealloc_ring[MAX_PENDING_REQS];
+static spinlock_t dealloc_lock = SPIN_LOCK_UNLOCKED;
+static PEND_RING_IDX dealloc_prod, dealloc_cons;
+
+static struct sk_buff_head tx_queue;
+static multicall_entry_t tx_mcl[MAX_PENDING_REQS];
+
+static struct list_head net_schedule_list;
+static spinlock_t net_schedule_list_lock;
+
+#define MAX_MFN_ALLOC 64
+static unsigned long mfn_list[MAX_MFN_ALLOC];
+static unsigned int alloc_index = 0;
+static spinlock_t mfn_lock = SPIN_LOCK_UNLOCKED;
+
- unsigned long mfn, flags;
++static unsigned long alloc_mfn(void)
+{
- if ( alloc_index == 0 )
- __refresh_mfn_list();
- mfn = mfn_list[--alloc_index];
++ unsigned long mfn = 0, flags;
+ spin_lock_irqsave(&mfn_lock, flags);
- static void dealloc_mfn(unsigned long mfn)
++ if ( unlikely(alloc_index == 0) )
++ alloc_index = HYPERVISOR_dom_mem_op(
++ MEMOP_increase_reservation, mfn_list, MAX_MFN_ALLOC, 0);
++ if ( alloc_index != 0 )
++ mfn = mfn_list[--alloc_index];
+ spin_unlock_irqrestore(&mfn_lock, flags);
+ return mfn;
+}
+
- new_mfn = get_new_mfn();
-
++static void free_mfn(unsigned long mfn)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&mfn_lock, flags);
+ if ( alloc_index != MAX_MFN_ALLOC )
+ mfn_list[alloc_index++] = mfn;
+ else if ( HYPERVISOR_dom_mem_op(MEMOP_decrease_reservation,
+ &mfn, 1, 0) != 1 )
+ BUG();
+ spin_unlock_irqrestore(&mfn_lock, flags);
+}
+
+static inline void maybe_schedule_tx_action(void)
+{
+ smp_mb();
+ if ( (NR_PENDING_REQS < (MAX_PENDING_REQS/2)) &&
+ !list_empty(&net_schedule_list) )
+ tasklet_schedule(&net_tx_tasklet);
+}
+
+/*
+ * A gross way of confirming the origin of an skb data page. The slab
+ * allocator abuses a field in the page struct to cache the kmem_cache_t ptr.
+ */
+static inline int is_xen_skb(struct sk_buff *skb)
+{
+ extern kmem_cache_t *skbuff_cachep;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
+ kmem_cache_t *cp = (kmem_cache_t *)virt_to_page(skb->head)->lru.next;
+#else
+ kmem_cache_t *cp = (kmem_cache_t *)virt_to_page(skb->head)->list.next;
+#endif
+ return (cp == skbuff_cachep);
+}
+
+int netif_be_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ netif_t *netif = (netif_t *)dev->priv;
+
+ ASSERT(skb->dev == dev);
+
+ /* Drop the packet if the target domain has no receive buffers. */
+ if ( (netif->rx_req_cons == netif->rx->req_prod) ||
+ ((netif->rx_req_cons-netif->rx_resp_prod) == NETIF_RX_RING_SIZE) )
+ goto drop;
+
+ /*
+ * We do not copy the packet unless:
+ * 1. The data is shared; or
+ * 2. The data is not allocated from our special cache.
+ * NB. We also couldn't cope with fragmented packets, but we won't get
+ * any because we not advertise the NETIF_F_SG feature.
+ */
+ if ( skb_shared(skb) || skb_cloned(skb) || !is_xen_skb(skb) )
+ {
+ int hlen = skb->data - skb->head;
+ struct sk_buff *nskb = dev_alloc_skb(hlen + skb->len);
+ if ( unlikely(nskb == NULL) )
+ goto drop;
+ skb_reserve(nskb, hlen);
+ __skb_put(nskb, skb->len);
+ (void)skb_copy_bits(skb, -hlen, nskb->data - hlen, skb->len + hlen);
+ nskb->dev = skb->dev;
+ dev_kfree_skb(skb);
+ skb = nskb;
+ }
+
+ netif->rx_req_cons++;
+
+ skb_queue_tail(&rx_queue, skb);
+ tasklet_schedule(&net_rx_tasklet);
+
+ return 0;
+
+ drop:
+ netif->stats.tx_dropped++;
+ dev_kfree_skb(skb);
+ return 0;
+}
+
+#if 0
+static void xen_network_done_notify(void)
+{
+ static struct net_device *eth0_dev = NULL;
+ if ( unlikely(eth0_dev == NULL) )
+ eth0_dev = __dev_get_by_name("eth0");
+ netif_rx_schedule(eth0_dev);
+}
+/*
+ * Add following to poll() function in NAPI driver (Tigon3 is example):
+ * if ( xen_network_done() )
+ * tg3_enable_ints(tp);
+ */
+int xen_network_done(void)
+{
+ return skb_queue_empty(&rx_queue);
+}
+#endif
+
+static void net_rx_action(unsigned long unused)
+{
+ netif_t *netif;
+ s8 status;
+ u16 size, id, evtchn;
+ mmu_update_t *mmu;
+ multicall_entry_t *mcl;
+ unsigned long vdata, mdata, new_mfn;
+ struct sk_buff_head rxq;
+ struct sk_buff *skb;
+ u16 notify_list[NETIF_RX_RING_SIZE];
+ int notify_nr = 0;
+
+ skb_queue_head_init(&rxq);
+
+ mcl = rx_mcl;
+ mmu = rx_mmu;
+ while ( (skb = skb_dequeue(&rx_queue)) != NULL )
+ {
+ netif = (netif_t *)skb->dev->priv;
+ vdata = (unsigned long)skb->data;
+ mdata = virt_to_machine(vdata);
- dealloc_mfn(mdata >> PAGE_SHIFT);
++
++ /* Memory squeeze? Back off for an arbitrary while. */
++ if ( (new_mfn = alloc_mfn()) == 0 )
++ {
++ if ( net_ratelimit() )
++ printk(KERN_WARNING "Memory squeeze in netback driver.\n");
++ mod_timer(&net_timer, jiffies + HZ);
++ break;
++ }
++
+ /*
+ * Set the new P2M table entry before reassigning the old data page.
+ * Heed the comment in pgtable-2level.h:pte_page(). :-)
+ */
+ phys_to_machine_mapping[__pa(skb->data) >> PAGE_SHIFT] = new_mfn;
+
+ mmu[0].ptr = (new_mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE;
+ mmu[0].val = __pa(vdata) >> PAGE_SHIFT;
+ mmu[1].ptr = MMU_EXTENDED_COMMAND;
+ mmu[1].val = MMUEXT_SET_FOREIGNDOM;
+ mmu[1].val |= (unsigned long)netif->domid << 16;
+ mmu[2].ptr = (mdata & PAGE_MASK) | MMU_EXTENDED_COMMAND;
+ mmu[2].val = MMUEXT_REASSIGN_PAGE;
+
+ mcl[0].op = __HYPERVISOR_update_va_mapping;
+ mcl[0].args[0] = vdata >> PAGE_SHIFT;
+ mcl[0].args[1] = (new_mfn << PAGE_SHIFT) | __PAGE_KERNEL;
+ mcl[0].args[2] = 0;
+ mcl[1].op = __HYPERVISOR_mmu_update;
+ mcl[1].args[0] = (unsigned long)mmu;
+ mcl[1].args[1] = 3;
+ mcl[1].args[2] = 0;
+
+ mcl += 2;
+ mmu += 3;
+
+ __skb_queue_tail(&rxq, skb);
+
+ /* Filled the batch queue? */
+ if ( (mcl - rx_mcl) == ARRAY_SIZE(rx_mcl) )
+ break;
+ }
+
+ if ( mcl == rx_mcl )
+ return;
+
+ mcl[-2].args[2] = UVMF_FLUSH_TLB;
+ if ( unlikely(HYPERVISOR_multicall(rx_mcl, mcl - rx_mcl) != 0) )
+ BUG();
+
+ mcl = rx_mcl;
+ mmu = rx_mmu;
+ while ( (skb = __skb_dequeue(&rxq)) != NULL )
+ {
+ netif = (netif_t *)skb->dev->priv;
+ size = skb->tail - skb->data;
+
+ /* Rederive the machine addresses. */
+ new_mfn = mcl[0].args[1] >> PAGE_SHIFT;
+ mdata = ((mmu[2].ptr & PAGE_MASK) |
+ ((unsigned long)skb->data & ~PAGE_MASK));
+
+ atomic_set(&(skb_shinfo(skb)->dataref), 1);
+ skb_shinfo(skb)->nr_frags = 0;
+ skb_shinfo(skb)->frag_list = NULL;
+
+ netif->stats.tx_bytes += size;
+ netif->stats.tx_packets++;
+
+ /* The update_va_mapping() must not fail. */
+ if ( unlikely(mcl[0].args[5] != 0) )
+ BUG();
+
+ /* Check the reassignment error code. */
+ status = NETIF_RSP_OKAY;
+ if ( unlikely(mcl[1].args[5] != 0) )
+ {
+ DPRINTK("Failed MMU update transferring to DOM%u\n", netif->domid);
- if ( !skb_queue_empty(&rx_queue) )
++ free_mfn(mdata >> PAGE_SHIFT);
+ status = NETIF_RSP_ERROR;
+ }
+
+ evtchn = netif->evtchn;
+ id = netif->rx->ring[MASK_NETIF_RX_IDX(netif->rx_resp_prod)].req.id;
+ if ( make_rx_response(netif, id, status, mdata, size) &&
+ (rx_notify[evtchn] == 0) )
+ {
+ rx_notify[evtchn] = 1;
+ notify_list[notify_nr++] = evtchn;
+ }
+
+ dev_kfree_skb(skb);
+
+ mcl += 2;
+ mmu += 3;
+ }
+
+ while ( notify_nr != 0 )
+ {
+ evtchn = notify_list[--notify_nr];
+ rx_notify[evtchn] = 0;
+ notify_via_evtchn(evtchn);
+ }
+
+ /* More work to do? */
++ if ( !skb_queue_empty(&rx_queue) && !timer_pending(&net_timer) )
+ tasklet_schedule(&net_rx_tasklet);
+#if 0
+ else
+ xen_network_done_notify();
+#endif
+}
+
++static void net_alarm(unsigned long unused)
++{
++ tasklet_schedule(&net_rx_tasklet);
++}
++
+struct net_device_stats *netif_be_get_stats(struct net_device *dev)
+{
+ netif_t *netif = dev->priv;
+ return &netif->stats;
+}
+
+static int __on_net_schedule_list(netif_t *netif)
+{
+ return netif->list.next != NULL;
+}
+
+static void remove_from_net_schedule_list(netif_t *netif)
+{
+ spin_lock_irq(&net_schedule_list_lock);
+ if ( likely(__on_net_schedule_list(netif)) )
+ {
+ list_del(&netif->list);
+ netif->list.next = NULL;
+ netif_put(netif);
+ }
+ spin_unlock_irq(&net_schedule_list_lock);
+}
+
+static void add_to_net_schedule_list_tail(netif_t *netif)
+{
+ if ( __on_net_schedule_list(netif) )
+ return;
+
+ spin_lock_irq(&net_schedule_list_lock);
+ if ( !__on_net_schedule_list(netif) && (netif->status == CONNECTED) )
+ {
+ list_add_tail(&netif->list, &net_schedule_list);
+ netif_get(netif);
+ }
+ spin_unlock_irq(&net_schedule_list_lock);
+}
+
+static inline void netif_schedule_work(netif_t *netif)
+{
+ if ( (netif->tx_req_cons != netif->tx->req_prod) &&
+ ((netif->tx_req_cons-netif->tx_resp_prod) != NETIF_TX_RING_SIZE) )
+ {
+ add_to_net_schedule_list_tail(netif);
+ maybe_schedule_tx_action();
+ }
+}
+
+void netif_deschedule(netif_t *netif)
+{
+ remove_from_net_schedule_list(netif);
+}
+
+#if 0
+static void tx_credit_callback(unsigned long data)
+{
+ netif_t *netif = (netif_t *)data;
+ netif->remaining_credit = netif->credit_bytes;
+ netif_schedule_work(netif);
+}
+#endif
+
+static void net_tx_action(unsigned long unused)
+{
+ struct list_head *ent;
+ struct sk_buff *skb;
+ netif_t *netif;
+ netif_tx_request_t txreq;
+ u16 pending_idx;
+ NETIF_RING_IDX i;
+ multicall_entry_t *mcl;
+ PEND_RING_IDX dc, dp;
+ unsigned int data_len;
+
+ if ( (dc = dealloc_cons) == (dp = dealloc_prod) )
+ goto skip_dealloc;
+
+ mcl = tx_mcl;
+ while ( dc != dp )
+ {
+ pending_idx = dealloc_ring[MASK_PEND_IDX(dc++)];
+ mcl[0].op = __HYPERVISOR_update_va_mapping;
+ mcl[0].args[0] = MMAP_VADDR(pending_idx) >> PAGE_SHIFT;
+ mcl[0].args[1] = 0;
+ mcl[0].args[2] = 0;
+ mcl++;
+ }
+
+ mcl[-1].args[2] = UVMF_FLUSH_TLB;
+ if ( unlikely(HYPERVISOR_multicall(tx_mcl, mcl - tx_mcl) != 0) )
+ BUG();
+
+ mcl = tx_mcl;
+ while ( dealloc_cons != dp )
+ {
+ /* The update_va_mapping() must not fail. */
+ if ( unlikely(mcl[0].args[5] != 0) )
+ BUG();
+
+ pending_idx = dealloc_ring[MASK_PEND_IDX(dealloc_cons++)];
+
+ netif = pending_tx_info[pending_idx].netif;
+
+ spin_lock(&netif->tx_lock);
+ make_tx_response(netif, pending_tx_info[pending_idx].req.id,
+ NETIF_RSP_OKAY);
+ spin_unlock(&netif->tx_lock);
+
+ pending_ring[MASK_PEND_IDX(pending_prod++)] = pending_idx;
+
+ /*
+ * Scheduling checks must happen after the above response is posted.
+ * This avoids a possible race with a guest OS on another CPU if that
+ * guest is testing against 'resp_prod' when deciding whether to notify
+ * us when it queues additional packets.
+ */
+ mb();
+ if ( (netif->tx_req_cons != netif->tx->req_prod) &&
+ ((netif->tx_req_cons-netif->tx_resp_prod) != NETIF_TX_RING_SIZE) )
+ add_to_net_schedule_list_tail(netif);
+
+ netif_put(netif);
+
+ mcl++;
+ }
+
+ skip_dealloc:
+ mcl = tx_mcl;
+ while ( (NR_PENDING_REQS < MAX_PENDING_REQS) &&
+ !list_empty(&net_schedule_list) )
+ {
+ /* Get a netif from the list with work to do. */
+ ent = net_schedule_list.next;
+ netif = list_entry(ent, netif_t, list);
+ netif_get(netif);
+ remove_from_net_schedule_list(netif);
+
+ /* Work to do? */
+ i = netif->tx_req_cons;
+ if ( (i == netif->tx->req_prod) ||
+ ((i-netif->tx_resp_prod) == NETIF_TX_RING_SIZE) )
+ {
+ netif_put(netif);
+ continue;
+ }
+
+ netif->tx->req_cons = ++netif->tx_req_cons;
+
+ /*
+ * 1. Ensure that we see the request when we copy it.
+ * 2. Ensure that frontend sees updated req_cons before we check
+ * for more work to schedule.
+ */
+ mb();
+
+ memcpy(&txreq, &netif->tx->ring[MASK_NETIF_TX_IDX(i)].req,
+ sizeof(txreq));
+
+#if 0
+ /* Credit-based scheduling. */
+ if ( tx.size > netif->remaining_credit )
+ {
+ s_time_t now = NOW(), next_credit =
+ netif->credit_timeout.expires + MICROSECS(netif->credit_usec);
+ if ( next_credit <= now )
+ {
+ netif->credit_timeout.expires = now;
+ netif->remaining_credit = netif->credit_bytes;
+ }
+ else
+ {
+ netif->remaining_credit = 0;
+ netif->credit_timeout.expires = next_credit;
+ netif->credit_timeout.data = (unsigned long)netif;
+ netif->credit_timeout.function = tx_credit_callback;
+ netif->credit_timeout.cpu = smp_processor_id();
+ add_ac_timer(&netif->credit_timeout);
+ break;
+ }
+ }
+ netif->remaining_credit -= tx.size;
+#endif
+
+ netif_schedule_work(netif);
+
+ if ( unlikely(txreq.size < ETH_HLEN) ||
+ unlikely(txreq.size > ETH_FRAME_LEN) )
+ {
+ DPRINTK("Bad packet size: %d\n", txreq.size);
+ make_tx_response(netif, txreq.id, NETIF_RSP_ERROR);
+ netif_put(netif);
+ continue;
+ }
+
+ /* No crossing a page boundary as the payload mustn't fragment. */
+ if ( unlikely(((txreq.addr & ~PAGE_MASK) + txreq.size) >= PAGE_SIZE) )
+ {
+ DPRINTK("txreq.addr: %lx, size: %u, end: %lu\n",
+ txreq.addr, txreq.size,
+ (txreq.addr &~PAGE_MASK) + txreq.size);
+ make_tx_response(netif, txreq.id, NETIF_RSP_ERROR);
+ netif_put(netif);
+ continue;
+ }
+
+ pending_idx = pending_ring[MASK_PEND_IDX(pending_cons)];
+
+ data_len = txreq.size > PKT_PROT_LEN ? PKT_PROT_LEN : txreq.size;
+
+ if ( unlikely((skb = alloc_skb(data_len+16, GFP_ATOMIC)) == NULL) )
+ {
+ DPRINTK("Can't allocate a skb in start_xmit.\n");
+ make_tx_response(netif, txreq.id, NETIF_RSP_ERROR);
+ netif_put(netif);
+ break;
+ }
+
+ /* Packets passed to netif_rx() must have some headroom. */
+ skb_reserve(skb, 16);
+
+ mcl[0].op = __HYPERVISOR_update_va_mapping_otherdomain;
+ mcl[0].args[0] = MMAP_VADDR(pending_idx) >> PAGE_SHIFT;
+ mcl[0].args[1] = (txreq.addr & PAGE_MASK) | __PAGE_KERNEL;
+ mcl[0].args[2] = 0;
+ mcl[0].args[3] = netif->domid;
+ mcl++;
+
+ memcpy(&pending_tx_info[pending_idx].req, &txreq, sizeof(txreq));
+ pending_tx_info[pending_idx].netif = netif;
+ *((u16 *)skb->data) = pending_idx;
+
+ __skb_queue_tail(&tx_queue, skb);
+
+ pending_cons++;
+
+ /* Filled the batch queue? */
+ if ( (mcl - tx_mcl) == ARRAY_SIZE(tx_mcl) )
+ break;
+ }
+
+ if ( mcl == tx_mcl )
+ return;
+
+ if ( unlikely(HYPERVISOR_multicall(tx_mcl, mcl - tx_mcl) != 0) )
+ BUG();
+
+ mcl = tx_mcl;
+ while ( (skb = __skb_dequeue(&tx_queue)) != NULL )
+ {
+ pending_idx = *((u16 *)skb->data);
+ netif = pending_tx_info[pending_idx].netif;
+ memcpy(&txreq, &pending_tx_info[pending_idx].req, sizeof(txreq));
+
+ /* Check the remap error code. */
+ if ( unlikely(mcl[0].args[5] != 0) )
+ {
+ DPRINTK("Bad page frame\n");
+ make_tx_response(netif, txreq.id, NETIF_RSP_ERROR);
+ netif_put(netif);
+ kfree_skb(skb);
+ mcl++;
+ pending_ring[MASK_PEND_IDX(pending_prod++)] = pending_idx;
+ continue;
+ }
+
+ phys_to_machine_mapping[__pa(MMAP_VADDR(pending_idx)) >> PAGE_SHIFT] =
+ FOREIGN_FRAME(txreq.addr >> PAGE_SHIFT);
+
+ data_len = txreq.size > PKT_PROT_LEN ? PKT_PROT_LEN : txreq.size;
+
+ __skb_put(skb, data_len);
+ memcpy(skb->data,
+ (void *)(MMAP_VADDR(pending_idx)|(txreq.addr&~PAGE_MASK)),
+ data_len);
+
+ if (data_len < txreq.size) {
+ /* Append the packet payload as a fragment. */
+ skb_shinfo(skb)->frags[0].page =
+ virt_to_page(MMAP_VADDR(pending_idx));
+ skb_shinfo(skb)->frags[0].size = txreq.size - data_len;
+ skb_shinfo(skb)->frags[0].page_offset =
+ (txreq.addr + data_len) & ~PAGE_MASK;
+ skb_shinfo(skb)->nr_frags = 1;
+ } else {
+ skb_shinfo(skb)->frags[0].page =
+ virt_to_page(MMAP_VADDR(pending_idx));
+ skb->destructor = netif_skb_release;
+ }
+
+ skb->data_len = txreq.size - data_len;
+ skb->len += skb->data_len;
+
+ skb->dev = netif->dev;
+ skb->protocol = eth_type_trans(skb, skb->dev);
+
+ netif->stats.rx_bytes += txreq.size;
+ netif->stats.rx_packets++;
+
+ netif_rx(skb);
+ netif->dev->last_rx = jiffies;
+
+ mcl++;
+ }
+}
+
+static void netif_idx_release(u16 pending_idx)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&dealloc_lock, flags);
+ dealloc_ring[MASK_PEND_IDX(dealloc_prod++)] = pending_idx;
+ spin_unlock_irqrestore(&dealloc_lock, flags);
+
+ tasklet_schedule(&net_tx_tasklet);
+}
+
+static void netif_page_release(struct page *page)
+{
+ u16 pending_idx = page - virt_to_page(mmap_vstart);
+
+ /* Ready for next use. */
+ set_page_count(page, 1);
+
+ netif_idx_release(pending_idx);
+}
+
+static void netif_skb_release(struct sk_buff *skb)
+{
+ struct page *page = skb_shinfo(skb)->frags[0].page;
+ u16 pending_idx = page - virt_to_page(mmap_vstart);
+
+ netif_idx_release(pending_idx);
+}
+
+#if 0
+long flush_bufs_for_netif(netif_t *netif)
+{
+ NETIF_RING_IDX i;
+
+ /* Return any outstanding receive buffers to the guest OS. */
+ spin_lock(&netif->rx_lock);
+ for ( i = netif->rx_req_cons;
+ (i != netif->rx->req_prod) &&
+ ((i-netif->rx_resp_prod) != NETIF_RX_RING_SIZE);
+ i++ )
+ {
+ make_rx_response(netif,
+ netif->rx->ring[MASK_NETIF_RX_IDX(i)].req.id,
+ NETIF_RSP_DROPPED, 0, 0);
+ }
+ netif->rx_req_cons = i;
+ spin_unlock(&netif->rx_lock);
+
+ /*
+ * Flush pending transmit buffers. The guest may still have to wait for
+ * buffers that are queued at a physical NIC.
+ */
+ spin_lock(&netif->tx_lock);
+ for ( i = netif->tx_req_cons;
+ (i != netif->tx->req_prod) &&
+ ((i-netif->tx_resp_prod) != NETIF_TX_RING_SIZE);
+ i++ )
+ {
+ make_tx_response(netif,
+ netif->tx->ring[MASK_NETIF_TX_IDX(i)].req.id,
+ NETIF_RSP_DROPPED);
+ }
+ netif->tx_req_cons = i;
+ spin_unlock(&netif->tx_lock);
+
+ return 0;
+}
+#endif
+
+irqreturn_t netif_be_int(int irq, void *dev_id, struct pt_regs *regs)
+{
+ netif_t *netif = dev_id;
+ if ( tx_work_exists(netif) )
+ {
+ add_to_net_schedule_list_tail(netif);
+ maybe_schedule_tx_action();
+ }
+ return IRQ_HANDLED;
+}
+
+static void make_tx_response(netif_t *netif,
+ u16 id,
+ s8 st)
+{
+ NETIF_RING_IDX i = netif->tx_resp_prod;
+ netif_tx_response_t *resp;
+
+ resp = &netif->tx->ring[MASK_NETIF_TX_IDX(i)].resp;
+ resp->id = id;
+ resp->status = st;
+ wmb();
+ netif->tx->resp_prod = netif->tx_resp_prod = ++i;
+
+ mb(); /* Update producer before checking event threshold. */
+ if ( i == netif->tx->event )
+ notify_via_evtchn(netif->evtchn);
+}
+
+static int make_rx_response(netif_t *netif,
+ u16 id,
+ s8 st,
+ memory_t addr,
+ u16 size)
+{
+ NETIF_RING_IDX i = netif->rx_resp_prod;
+ netif_rx_response_t *resp;
+
+ resp = &netif->rx->ring[MASK_NETIF_RX_IDX(i)].resp;
+ resp->addr = addr;
+ resp->id = id;
+ resp->status = (s16)size;
+ if ( st < 0 )
+ resp->status = (s16)st;
+ wmb();
+ netif->rx->resp_prod = netif->rx_resp_prod = ++i;
+
+ mb(); /* Update producer before checking event threshold. */
+ return (i == netif->rx->event);
+}
+
+static irqreturn_t netif_be_dbg(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct list_head *ent;
+ netif_t *netif;
+ int i = 0;
+
+ printk(KERN_ALERT "netif_schedule_list:\n");
+ spin_lock_irq(&net_schedule_list_lock);
+
+ list_for_each ( ent, &net_schedule_list )
+ {
+ netif = list_entry(ent, netif_t, list);
+ printk(KERN_ALERT " %d: private(rx_req_cons=%08x rx_resp_prod=%08x\n",
+ i, netif->rx_req_cons, netif->rx_resp_prod);
+ printk(KERN_ALERT " tx_req_cons=%08x tx_resp_prod=%08x)\n",
+ netif->tx_req_cons, netif->tx_resp_prod);
+ printk(KERN_ALERT " shared(rx_req_prod=%08x rx_resp_prod=%08x\n",
+ netif->rx->req_prod, netif->rx->resp_prod);
+ printk(KERN_ALERT " rx_event=%08x tx_req_prod=%08x\n",
+ netif->rx->event, netif->tx->req_prod);
+ printk(KERN_ALERT " tx_resp_prod=%08x, tx_event=%08x)\n",
+ netif->tx->resp_prod, netif->tx->event);
+ i++;
+ }
+
+ spin_unlock_irq(&net_schedule_list_lock);
+ printk(KERN_ALERT " ** End of netif_schedule_list **\n");
+
+ return IRQ_HANDLED;
+}
+
+static int __init netback_init(void)
+{
+ int i;
+ struct page *page;
+
+ if ( !(xen_start_info.flags & SIF_NET_BE_DOMAIN) &&
+ !(xen_start_info.flags & SIF_INITDOMAIN) )
+ return 0;
+
+ printk("Initialising Xen netif backend\n");
+
++ /* We can increase reservation by this much in net_rx_action(). */
++ balloon_update_driver_allowance(NETIF_RX_RING_SIZE);
++
+ skb_queue_head_init(&rx_queue);
+ skb_queue_head_init(&tx_queue);
+
++ init_timer(&net_timer);
++ net_timer.data = 0;
++ net_timer.function = net_alarm;
++
+ netif_interface_init();
+
+ if ( (mmap_vstart = allocate_empty_lowmem_region(MAX_PENDING_REQS)) == 0 )
+ BUG();
+
+ for ( i = 0; i < MAX_PENDING_REQS; i++ )
+ {
+ page = virt_to_page(MMAP_VADDR(i));
+ set_page_count(page, 1);
+ SetPageForeign(page, netif_page_release);
+ }
+
+ pending_cons = 0;
+ pending_prod = MAX_PENDING_REQS;
+ for ( i = 0; i < MAX_PENDING_REQS; i++ )
+ pending_ring[i] = i;
+
+ spin_lock_init(&net_schedule_list_lock);
+ INIT_LIST_HEAD(&net_schedule_list);
+
+ netif_ctrlif_init();
+
+ (void)request_irq(bind_virq_to_irq(VIRQ_DEBUG),
+ netif_be_dbg, SA_SHIRQ,
+ "net-be-dbg", &netif_be_dbg);
+
+ return 0;
+}
+
+static void netback_cleanup(void)
+{
+ BUG();
+}
+
+module_init(netback_init);
+module_exit(netback_cleanup);
--- /dev/null
- * Whatever - print an error and queue the id again straight away.
+/******************************************************************************
+ * Virtual network driver for conversing with remote driver backends.
+ *
+ * Copyright (c) 2002-2004, K A Fraser
+ *
+ * This file may be distributed separately from the Linux kernel, or
+ * incorporated into other software packages, subject to the following license:
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/version.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <linux/inetdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/init.h>
+#include <linux/bitops.h>
+#include <net/sock.h>
+#include <net/pkt_sched.h>
+#include <asm/io.h>
+#include <asm-xen/evtchn.h>
+#include <asm-xen/ctrl_if.h>
+#include <asm-xen/xen-public/io/netif.h>
++#include <asm-xen/balloon.h>
+#include <asm/page.h>
+
+#include <net/arp.h>
+#include <net/route.h>
+
+#define DEBUG 0
+
+#ifndef __GFP_NOWARN
+#define __GFP_NOWARN 0
+#endif
+#define alloc_xen_skb(_l) __dev_alloc_skb((_l), GFP_ATOMIC|__GFP_NOWARN)
+
+#define init_skb_shinfo(_skb) \
+ do { \
+ atomic_set(&(skb_shinfo(_skb)->dataref), 1); \
+ skb_shinfo(_skb)->nr_frags = 0; \
+ skb_shinfo(_skb)->frag_list = NULL; \
+ } while ( 0 )
+
+/* Allow headroom on each rx pkt for Ethernet header, alignment padding, ... */
+#define RX_HEADROOM 200
+
+/*
+ * If the backend driver is pipelining transmit requests then we can be very
+ * aggressive in avoiding new-packet notifications -- only need to send a
+ * notification if there are no outstanding unreceived responses.
+ * If the backend may be buffering our transmit buffers for any reason then we
+ * are rather more conservative.
+ */
+#ifdef CONFIG_XEN_NETDEV_FRONTEND_PIPELINED_TRANSMITTER
+#define TX_TEST_IDX resp_prod /* aggressive: any outstanding responses? */
+#else
+#define TX_TEST_IDX req_cons /* conservative: not seen all our requests? */
+#endif
+
+static void network_tx_buf_gc(struct net_device *dev);
+static void network_alloc_rx_buffers(struct net_device *dev);
+
+static unsigned long rx_pfn_array[NETIF_RX_RING_SIZE];
+static multicall_entry_t rx_mcl[NETIF_RX_RING_SIZE+1];
+static mmu_update_t rx_mmu[NETIF_RX_RING_SIZE];
+
+static struct list_head dev_list;
+
+struct net_private
+{
+ struct list_head list;
+ struct net_device *dev;
+
+ struct net_device_stats stats;
+ NETIF_RING_IDX rx_resp_cons, tx_resp_cons;
+ unsigned int tx_full;
+
+ netif_tx_interface_t *tx;
+ netif_rx_interface_t *rx;
+
+ spinlock_t tx_lock;
+ spinlock_t rx_lock;
+
+ unsigned int handle;
+ unsigned int evtchn;
+ unsigned int irq;
+
+ /* What is the status of our connection to the remote backend? */
+#define BEST_CLOSED 0
+#define BEST_DISCONNECTED 1
+#define BEST_CONNECTED 2
+ unsigned int backend_state;
+
+ /* Is this interface open or closed (down or up)? */
+#define UST_CLOSED 0
+#define UST_OPEN 1
+ unsigned int user_state;
+
+ /* Receive-ring batched refills. */
+#define RX_MIN_TARGET 8
+#define RX_MAX_TARGET NETIF_RX_RING_SIZE
+ int rx_target;
+ struct sk_buff_head rx_batch;
+
+ /*
+ * {tx,rx}_skbs store outstanding skbuffs. The first entry in each
+ * array is an index into a chain of free entries.
+ */
+ struct sk_buff *tx_skbs[NETIF_TX_RING_SIZE+1];
+ struct sk_buff *rx_skbs[NETIF_RX_RING_SIZE+1];
+};
+
+/* Access macros for acquiring freeing slots in {tx,rx}_skbs[]. */
+#define ADD_ID_TO_FREELIST(_list, _id) \
+ (_list)[(_id)] = (_list)[0]; \
+ (_list)[0] = (void *)(unsigned long)(_id);
+#define GET_ID_FROM_FREELIST(_list) \
+ ({ unsigned long _id = (unsigned long)(_list)[0]; \
+ (_list)[0] = (_list)[_id]; \
+ (unsigned short)_id; })
+
+static char *status_name[] = {
+ [NETIF_INTERFACE_STATUS_CLOSED] = "closed",
+ [NETIF_INTERFACE_STATUS_DISCONNECTED] = "disconnected",
+ [NETIF_INTERFACE_STATUS_CONNECTED] = "connected",
+ [NETIF_INTERFACE_STATUS_CHANGED] = "changed",
+};
+
+static char *be_state_name[] = {
+ [BEST_CLOSED] = "closed",
+ [BEST_DISCONNECTED] = "disconnected",
+ [BEST_CONNECTED] = "connected",
+};
+
+#if DEBUG
+#define DPRINTK(fmt, args...) \
+ printk(KERN_ALERT "xen_net (%s:%d) " fmt, __FUNCTION__, __LINE__, ##args)
+#else
+#define DPRINTK(fmt, args...) ((void)0)
+#endif
+#define IPRINTK(fmt, args...) \
+ printk(KERN_INFO "xen_net: " fmt, ##args)
+#define WPRINTK(fmt, args...) \
+ printk(KERN_WARNING "xen_net: " fmt, ##args)
+
+static struct net_device *find_dev_by_handle(unsigned int handle)
+{
+ struct list_head *ent;
+ struct net_private *np;
+ list_for_each ( ent, &dev_list )
+ {
+ np = list_entry(ent, struct net_private, list);
+ if ( np->handle == handle )
+ return np->dev;
+ }
+ return NULL;
+}
+
+/** Network interface info. */
+struct netif_ctrl {
+ /** Number of interfaces. */
+ int interface_n;
+ /** Number of connected interfaces. */
+ int connected_n;
+ /** Error code. */
+ int err;
+ int up;
+};
+
+static struct netif_ctrl netctrl;
+
+static void netctrl_init(void)
+{
+ memset(&netctrl, 0, sizeof(netctrl));
+ netctrl.up = NETIF_DRIVER_STATUS_DOWN;
+}
+
+/** Get or set a network interface error.
+ */
+static int netctrl_err(int err)
+{
+ if ( (err < 0) && !netctrl.err )
+ netctrl.err = err;
+ return netctrl.err;
+}
+
+/** Test if all network interfaces are connected.
+ *
+ * @return 1 if all connected, 0 if not, negative error code otherwise
+ */
+static int netctrl_connected(void)
+{
+ int ok;
+
+ if ( netctrl.err )
+ ok = netctrl.err;
+ else if ( netctrl.up == NETIF_DRIVER_STATUS_UP )
+ ok = (netctrl.connected_n == netctrl.interface_n);
+ else
+ ok = 0;
+
+ return ok;
+}
+
+/** Count the connected network interfaces.
+ *
+ * @return connected count
+ */
+static int netctrl_connected_count(void)
+{
+
+ struct list_head *ent;
+ struct net_private *np;
+ unsigned int connected;
+
+ connected = 0;
+
+ list_for_each(ent, &dev_list) {
+ np = list_entry(ent, struct net_private, list);
+ if (np->backend_state == BEST_CONNECTED)
+ connected++;
+ }
+
+ netctrl.connected_n = connected;
+ DPRINTK("> connected_n=%d interface_n=%d\n",
+ netctrl.connected_n, netctrl.interface_n);
+ return connected;
+}
+
+/** Send a packet on a net device to encourage switches to learn the
+ * MAC. We send a fake ARP request.
+ *
+ * @param dev device
+ * @return 0 on success, error code otherwise
+ */
+static int vif_wake(struct net_device *dev)
+{
+ struct sk_buff *skb;
+ u32 src_ip, dst_ip;
+
+ dst_ip = INADDR_BROADCAST;
+ src_ip = inet_select_addr(dev, dst_ip, RT_SCOPE_LINK);
+
+ skb = arp_create(ARPOP_REPLY, ETH_P_ARP,
+ dst_ip, dev, src_ip,
+ /*dst_hw*/ NULL, /*src_hw*/ NULL,
+ /*target_hw*/ dev->dev_addr);
+ if ( skb == NULL )
+ return -ENOMEM;
+
+ return dev_queue_xmit(skb);
+}
+
+static int network_open(struct net_device *dev)
+{
+ struct net_private *np = dev->priv;
+
+ memset(&np->stats, 0, sizeof(np->stats));
+
+ np->user_state = UST_OPEN;
+
+ network_alloc_rx_buffers(dev);
+ np->rx->event = np->rx_resp_cons + 1;
+
+ netif_start_queue(dev);
+
+ return 0;
+}
+
+static void network_tx_buf_gc(struct net_device *dev)
+{
+ NETIF_RING_IDX i, prod;
+ unsigned short id;
+ struct net_private *np = dev->priv;
+ struct sk_buff *skb;
+
+ if ( np->backend_state != BEST_CONNECTED )
+ return;
+
+ do {
+ prod = np->tx->resp_prod;
+ rmb(); /* Ensure we see responses up to 'rp'. */
+
+ for ( i = np->tx_resp_cons; i != prod; i++ )
+ {
+ id = np->tx->ring[MASK_NETIF_TX_IDX(i)].resp.id;
+ skb = np->tx_skbs[id];
+ ADD_ID_TO_FREELIST(np->tx_skbs, id);
+ dev_kfree_skb_irq(skb);
+ }
+
+ np->tx_resp_cons = prod;
+
+ /*
+ * Set a new event, then check for race with update of tx_cons. Note
+ * that it is essential to schedule a callback, no matter how few
+ * buffers are pending. Even if there is space in the transmit ring,
+ * higher layers may be blocked because too much data is outstanding:
+ * in such cases notification from Xen is likely to be the only kick
+ * that we'll get.
+ */
+ np->tx->event =
+ prod + ((np->tx->req_prod - prod) >> 1) + 1;
+ mb();
+ }
+ while ( prod != np->tx->resp_prod );
+
+ if ( np->tx_full &&
+ ((np->tx->req_prod - prod) < NETIF_TX_RING_SIZE) )
+ {
+ np->tx_full = 0;
+ if ( np->user_state == UST_OPEN )
+ netif_wake_queue(dev);
+ }
+}
+
+
+static void network_alloc_rx_buffers(struct net_device *dev)
+{
+ unsigned short id;
+ struct net_private *np = dev->priv;
+ struct sk_buff *skb;
+ int i, batch_target;
+ NETIF_RING_IDX req_prod = np->rx->req_prod;
+
+ if ( unlikely(np->backend_state != BEST_CONNECTED) )
+ return;
+
+ /*
+ * Allocate skbuffs greedily, even though we batch updates to the
+ * receive ring. This creates a less bursty demand on the memory allocator,
+ * so should reduce the chance of failed allocation requests both for
+ * ourself and for other kernel subsystems.
+ */
+ batch_target = np->rx_target - (req_prod - np->rx_resp_cons);
+ for ( i = skb_queue_len(&np->rx_batch); i < batch_target; i++ )
+ {
+ if ( unlikely((skb = alloc_xen_skb(dev->mtu + RX_HEADROOM)) == NULL) )
+ break;
+ __skb_queue_tail(&np->rx_batch, skb);
+ }
+
+ /* Is the batch large enough to be worthwhile? */
+ if ( i < (np->rx_target/2) )
+ return;
+
+ for ( i = 0; ; i++ )
+ {
+ if ( (skb = __skb_dequeue(&np->rx_batch)) == NULL )
+ break;
+
+ skb->dev = dev;
+
+ id = GET_ID_FROM_FREELIST(np->rx_skbs);
+
+ np->rx_skbs[id] = skb;
+
+ np->rx->ring[MASK_NETIF_RX_IDX(req_prod + i)].req.id = id;
+
+ rx_pfn_array[i] = virt_to_machine(skb->head) >> PAGE_SHIFT;
+
+ /* Remove this page from pseudo phys map before passing back to Xen. */
+ phys_to_machine_mapping[__pa(skb->head) >> PAGE_SHIFT]
+ = INVALID_P2M_ENTRY;
+
+ rx_mcl[i].op = __HYPERVISOR_update_va_mapping;
+ rx_mcl[i].args[0] = (unsigned long)skb->head >> PAGE_SHIFT;
+ rx_mcl[i].args[1] = 0;
+ rx_mcl[i].args[2] = 0;
+ }
+
+ /*
+ * We may have allocated buffers which have entries outstanding in the page
+ * update queue -- make sure we flush those first!
+ */
+ flush_page_update_queue();
+
+ /* After all PTEs have been zapped we blow away stale TLB entries. */
+ rx_mcl[i-1].args[2] = UVMF_FLUSH_TLB;
+
+ /* Give away a batch of pages. */
+ rx_mcl[i].op = __HYPERVISOR_dom_mem_op;
+ rx_mcl[i].args[0] = MEMOP_decrease_reservation;
+ rx_mcl[i].args[1] = (unsigned long)rx_pfn_array;
+ rx_mcl[i].args[2] = (unsigned long)i;
+ rx_mcl[i].args[3] = 0;
+ rx_mcl[i].args[4] = DOMID_SELF;
+
++ /* Tell the ballon driver what is going on. */
++ balloon_update_driver_allowance(i);
++
+ /* Zap PTEs and give away pages in one big multicall. */
+ (void)HYPERVISOR_multicall(rx_mcl, i+1);
+
+ /* Check return status of HYPERVISOR_dom_mem_op(). */
+ if ( unlikely(rx_mcl[i].args[5] != i) )
+ panic("Unable to reduce memory reservation\n");
+
+ /* Above is a suitable barrier to ensure backend will see requests. */
+ np->rx->req_prod = req_prod + i;
+
+ /* Adjust our floating fill target if we risked running out of buffers. */
+ if ( ((req_prod - np->rx->resp_prod) < (np->rx_target / 4)) &&
+ ((np->rx_target *= 2) > RX_MAX_TARGET) )
+ np->rx_target = RX_MAX_TARGET;
+}
+
+
+static int network_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ unsigned short id;
+ struct net_private *np = (struct net_private *)dev->priv;
+ netif_tx_request_t *tx;
+ NETIF_RING_IDX i;
+
+ if ( unlikely(np->tx_full) )
+ {
+ printk(KERN_ALERT "%s: full queue wasn't stopped!\n", dev->name);
+ netif_stop_queue(dev);
+ goto drop;
+ }
+
+ if ( unlikely((((unsigned long)skb->data & ~PAGE_MASK) + skb->len) >=
+ PAGE_SIZE) )
+ {
+ struct sk_buff *nskb;
+ if ( unlikely((nskb = alloc_xen_skb(skb->len)) == NULL) )
+ goto drop;
+ skb_put(nskb, skb->len);
+ memcpy(nskb->data, skb->data, skb->len);
+ nskb->dev = skb->dev;
+ dev_kfree_skb(skb);
+ skb = nskb;
+ }
+
+ spin_lock_irq(&np->tx_lock);
+
+ if ( np->backend_state != BEST_CONNECTED )
+ {
+ spin_unlock_irq(&np->tx_lock);
+ goto drop;
+ }
+
+ i = np->tx->req_prod;
+
+ id = GET_ID_FROM_FREELIST(np->tx_skbs);
+ np->tx_skbs[id] = skb;
+
+ tx = &np->tx->ring[MASK_NETIF_TX_IDX(i)].req;
+
+ tx->id = id;
+ tx->addr = virt_to_machine(skb->data);
+ tx->size = skb->len;
+
+ wmb(); /* Ensure that backend will see the request. */
+ np->tx->req_prod = i + 1;
+
+ network_tx_buf_gc(dev);
+
+ if ( (i - np->tx_resp_cons) == (NETIF_TX_RING_SIZE - 1) )
+ {
+ np->tx_full = 1;
+ netif_stop_queue(dev);
+ }
+
+ spin_unlock_irq(&np->tx_lock);
+
+ np->stats.tx_bytes += skb->len;
+ np->stats.tx_packets++;
+
+ /* Only notify Xen if we really have to. */
+ mb();
+ if ( np->tx->TX_TEST_IDX == i )
+ notify_via_evtchn(np->evtchn);
+
+ return 0;
+
+ drop:
+ np->stats.tx_dropped++;
+ dev_kfree_skb(skb);
+ return 0;
+}
+
+
+static irqreturn_t netif_int(int irq, void *dev_id, struct pt_regs *ptregs)
+{
+ struct net_device *dev = dev_id;
+ struct net_private *np = dev->priv;
+ unsigned long flags;
+
+ spin_lock_irqsave(&np->tx_lock, flags);
+ network_tx_buf_gc(dev);
+ spin_unlock_irqrestore(&np->tx_lock, flags);
+
+ if ( (np->rx_resp_cons != np->rx->resp_prod) &&
+ (np->user_state == UST_OPEN) )
+ netif_rx_schedule(dev);
+
+ return IRQ_HANDLED;
+}
+
+
+static int netif_poll(struct net_device *dev, int *pbudget)
+{
+ struct net_private *np = dev->priv;
+ struct sk_buff *skb, *nskb;
+ netif_rx_response_t *rx;
+ NETIF_RING_IDX i, rp;
+ mmu_update_t *mmu = rx_mmu;
+ multicall_entry_t *mcl = rx_mcl;
+ int work_done, budget, more_to_do = 1;
+ struct sk_buff_head rxq;
+ unsigned long flags;
+
+ spin_lock(&np->rx_lock);
+
+ if ( np->backend_state != BEST_CONNECTED )
+ {
+ spin_unlock(&np->rx_lock);
+ return 0;
+ }
+
+ skb_queue_head_init(&rxq);
+
+ if ( (budget = *pbudget) > dev->quota )
+ budget = dev->quota;
+
+ rp = np->rx->resp_prod;
+ rmb(); /* Ensure we see queued responses up to 'rp'. */
+
+ for ( i = np->rx_resp_cons, work_done = 0;
+ (i != rp) && (work_done < budget);
+ i++, work_done++ )
+ {
+ rx = &np->rx->ring[MASK_NETIF_RX_IDX(i)].resp;
+
+ /*
+ * An error here is very odd. Usually indicates a backend bug,
+ * low-memory condition, or that we didn't have reservation headroom.
- printk(KERN_ALERT "bad buffer on RX ring!(%d)\n", rx->status);
+ */
+ if ( unlikely(rx->status <= 0) )
+ {
++ if ( net_ratelimit() )
++ printk(KERN_WARNING "Bad rx buffer (memory squeeze?).\n");
+ np->rx->ring[MASK_NETIF_RX_IDX(np->rx->req_prod)].req.id = rx->id;
+ wmb();
+ np->rx->req_prod++;
++ work_done--;
+ continue;
+ }
+
+ skb = np->rx_skbs[rx->id];
+ ADD_ID_TO_FREELIST(np->rx_skbs, rx->id);
+
+ /* NB. We handle skb overflow later. */
+ skb->data = skb->head + (rx->addr & ~PAGE_MASK);
+ skb->len = rx->status;
+ skb->tail = skb->data + skb->len;
+
+ np->stats.rx_packets++;
+ np->stats.rx_bytes += rx->status;
+
+ /* Remap the page. */
+ mmu->ptr = (rx->addr & PAGE_MASK) | MMU_MACHPHYS_UPDATE;
+ mmu->val = __pa(skb->head) >> PAGE_SHIFT;
+ mmu++;
+ mcl->op = __HYPERVISOR_update_va_mapping;
+ mcl->args[0] = (unsigned long)skb->head >> PAGE_SHIFT;
+ mcl->args[1] = (rx->addr & PAGE_MASK) | __PAGE_KERNEL;
+ mcl->args[2] = 0;
+ mcl++;
+
+ phys_to_machine_mapping[__pa(skb->head) >> PAGE_SHIFT] =
+ rx->addr >> PAGE_SHIFT;
+
+ __skb_queue_tail(&rxq, skb);
+ }
+
++ /* Some pages are no longer absent... */
++ balloon_update_driver_allowance(-work_done);
++
+ /* Do all the remapping work, and M->P updates, in one big hypercall. */
+ if ( likely((mcl - rx_mcl) != 0) )
+ {
+ mcl->op = __HYPERVISOR_mmu_update;
+ mcl->args[0] = (unsigned long)rx_mmu;
+ mcl->args[1] = mmu - rx_mmu;
+ mcl->args[2] = 0;
+ mcl++;
+ (void)HYPERVISOR_multicall(rx_mcl, mcl - rx_mcl);
+ }
+
+ while ( (skb = __skb_dequeue(&rxq)) != NULL )
+ {
+ /*
+ * Enough room in skbuff for the data we were passed? Also, Linux
+ * expects at least 16 bytes headroom in each receive buffer.
+ */
+ if ( unlikely(skb->tail > skb->end) ||
+ unlikely((skb->data - skb->head) < 16) )
+ {
+ nskb = NULL;
+
+ /* Only copy the packet if it fits in the current MTU. */
+ if ( skb->len <= (dev->mtu + ETH_HLEN) )
+ {
+ if ( (skb->tail > skb->end) && net_ratelimit() )
+ printk(KERN_INFO "Received packet needs %d bytes more "
+ "headroom.\n", skb->tail - skb->end);
+
+ if ( (nskb = alloc_xen_skb(skb->len + 2)) != NULL )
+ {
+ skb_reserve(nskb, 2);
+ skb_put(nskb, skb->len);
+ memcpy(nskb->data, skb->data, skb->len);
+ nskb->dev = skb->dev;
+ }
+ }
+ else if ( net_ratelimit() )
+ printk(KERN_INFO "Received packet too big for MTU "
+ "(%d > %d)\n", skb->len - ETH_HLEN, dev->mtu);
+
+ /* Reinitialise and then destroy the old skbuff. */
+ skb->len = 0;
+ skb->tail = skb->data;
+ init_skb_shinfo(skb);
+ dev_kfree_skb(skb);
+
+ /* Switch old for new, if we copied the buffer. */
+ if ( (skb = nskb) == NULL )
+ continue;
+ }
+
+ /* Set the shared-info area, which is hidden behind the real data. */
+ init_skb_shinfo(skb);
+
+ /* Ethernet-specific work. Delayed to here as it peeks the header. */
+ skb->protocol = eth_type_trans(skb, dev);
+
+ /* Pass it up. */
+ netif_receive_skb(skb);
+ dev->last_rx = jiffies;
+ }
+
+ np->rx_resp_cons = i;
+
+ /* If we get a callback with very few responses, reduce fill target. */
+ /* NB. Note exponential increase, linear decrease. */
+ if ( ((np->rx->req_prod - np->rx->resp_prod) > ((3*np->rx_target) / 4)) &&
+ (--np->rx_target < RX_MIN_TARGET) )
+ np->rx_target = RX_MIN_TARGET;
+
+ network_alloc_rx_buffers(dev);
+
+ *pbudget -= work_done;
+ dev->quota -= work_done;
+
+ if ( work_done < budget )
+ {
+ local_irq_save(flags);
+
+ np->rx->event = i + 1;
+
+ /* Deal with hypervisor racing our resetting of rx_event. */
+ mb();
+ if ( np->rx->resp_prod == i )
+ {
+ __netif_rx_complete(dev);
+ more_to_do = 0;
+ }
+
+ local_irq_restore(flags);
+ }
+
+ spin_unlock(&np->rx_lock);
+
+ return more_to_do;
+}
+
+
+static int network_close(struct net_device *dev)
+{
+ struct net_private *np = dev->priv;
+ np->user_state = UST_CLOSED;
+ netif_stop_queue(np->dev);
+ return 0;
+}
+
+
+static struct net_device_stats *network_get_stats(struct net_device *dev)
+{
+ struct net_private *np = (struct net_private *)dev->priv;
+ return &np->stats;
+}
+
+
+static void network_connect(struct net_device *dev,
+ netif_fe_interface_status_t *status)
+{
+ struct net_private *np;
+ int i, requeue_idx;
+ netif_tx_request_t *tx;
+
+ np = dev->priv;
+ spin_lock_irq(&np->tx_lock);
+ spin_lock(&np->rx_lock);
+
+ /* Recovery procedure: */
+
+ /* Step 1: Reinitialise variables. */
+ np->rx_resp_cons = np->tx_resp_cons = np->tx_full = 0;
+ np->rx->event = np->tx->event = 1;
+
+ /* Step 2: Rebuild the RX and TX ring contents.
+ * NB. We could just free the queued TX packets now but we hope
+ * that sending them out might do some good. We have to rebuild
+ * the RX ring because some of our pages are currently flipped out
+ * so we can't just free the RX skbs.
+ * NB2. Freelist index entries are always going to be less than
+ * __PAGE_OFFSET, whereas pointers to skbs will always be equal or
+ * greater than __PAGE_OFFSET: we use this property to distinguish
+ * them.
+ */
+
+ /* Rebuild the TX buffer freelist and the TX ring itself.
+ * NB. This reorders packets. We could keep more private state
+ * to avoid this but maybe it doesn't matter so much given the
+ * interface has been down.
+ */
+ for ( requeue_idx = 0, i = 1; i <= NETIF_TX_RING_SIZE; i++ )
+ {
+ if ( (unsigned long)np->tx_skbs[i] >= __PAGE_OFFSET )
+ {
+ struct sk_buff *skb = np->tx_skbs[i];
+
+ tx = &np->tx->ring[requeue_idx++].req;
+
+ tx->id = i;
+ tx->addr = virt_to_machine(skb->data);
+ tx->size = skb->len;
+
+ np->stats.tx_bytes += skb->len;
+ np->stats.tx_packets++;
+ }
+ }
+ wmb();
+ np->tx->req_prod = requeue_idx;
+
+ /* Rebuild the RX buffer freelist and the RX ring itself. */
+ for ( requeue_idx = 0, i = 1; i <= NETIF_RX_RING_SIZE; i++ )
+ if ( (unsigned long)np->rx_skbs[i] >= __PAGE_OFFSET )
+ np->rx->ring[requeue_idx++].req.id = i;
+ wmb();
+ np->rx->req_prod = requeue_idx;
+
+ /* Step 3: All public and private state should now be sane. Get
+ * ready to start sending and receiving packets and give the driver
+ * domain a kick because we've probably just requeued some
+ * packets.
+ */
+ np->backend_state = BEST_CONNECTED;
+ wmb();
+ notify_via_evtchn(status->evtchn);
+ network_tx_buf_gc(dev);
+
+ if ( np->user_state == UST_OPEN )
+ netif_start_queue(dev);
+
+ spin_unlock(&np->rx_lock);
+ spin_unlock_irq(&np->tx_lock);
+}
+
+static void vif_show(struct net_private *np)
+{
+#if DEBUG
+ if (np) {
+ IPRINTK("<vif handle=%u %s(%s) evtchn=%u irq=%u tx=%p rx=%p>\n",
+ np->handle,
+ be_state_name[np->backend_state],
+ np->user_state ? "open" : "closed",
+ np->evtchn,
+ np->irq,
+ np->tx,
+ np->rx);
+ } else {
+ IPRINTK("<vif NULL>\n");
+ }
+#endif
+}
+
+/* Send a connect message to xend to tell it to bring up the interface. */
+static void send_interface_connect(struct net_private *np)
+{
+ ctrl_msg_t cmsg = {
+ .type = CMSG_NETIF_FE,
+ .subtype = CMSG_NETIF_FE_INTERFACE_CONNECT,
+ .length = sizeof(netif_fe_interface_connect_t),
+ };
+ netif_fe_interface_connect_t *msg = (void*)cmsg.msg;
+
+ DPRINTK(">\n"); vif_show(np);
+ msg->handle = np->handle;
+ msg->tx_shmem_frame = (virt_to_machine(np->tx) >> PAGE_SHIFT);
+ msg->rx_shmem_frame = (virt_to_machine(np->rx) >> PAGE_SHIFT);
+
+ ctrl_if_send_message_block(&cmsg, NULL, 0, TASK_UNINTERRUPTIBLE);
+ DPRINTK("<\n");
+}
+
+/* Send a driver status notification to the domain controller. */
+static int send_driver_status(int ok)
+{
+ int err = 0;
+ ctrl_msg_t cmsg = {
+ .type = CMSG_NETIF_FE,
+ .subtype = CMSG_NETIF_FE_DRIVER_STATUS,
+ .length = sizeof(netif_fe_driver_status_t),
+ };
+ netif_fe_driver_status_t *msg = (void*)cmsg.msg;
+
+ msg->status = (ok ? NETIF_DRIVER_STATUS_UP : NETIF_DRIVER_STATUS_DOWN);
+ err = ctrl_if_send_message_block(&cmsg, NULL, 0, TASK_UNINTERRUPTIBLE);
+ return err;
+}
+
+/* Stop network device and free tx/rx queues and irq.
+ */
+static void vif_release(struct net_private *np)
+{
+ /* Stop old i/f to prevent errors whilst we rebuild the state. */
+ spin_lock_irq(&np->tx_lock);
+ spin_lock(&np->rx_lock);
+ netif_stop_queue(np->dev);
+ /* np->backend_state = BEST_DISCONNECTED; */
+ spin_unlock(&np->rx_lock);
+ spin_unlock_irq(&np->tx_lock);
+
+ /* Free resources. */
+ if(np->tx != NULL){
+ free_irq(np->irq, np->dev);
+ unbind_evtchn_from_irq(np->evtchn);
+ free_page((unsigned long)np->tx);
+ free_page((unsigned long)np->rx);
+ np->irq = 0;
+ np->evtchn = 0;
+ np->tx = NULL;
+ np->rx = NULL;
+ }
+}
+
+/* Release vif resources and close it down completely.
+ */
+static void vif_close(struct net_private *np)
+{
+ DPRINTK(">\n"); vif_show(np);
+ WPRINTK("Unexpected netif-CLOSED message in state %s\n",
+ be_state_name[np->backend_state]);
+ vif_release(np);
+ np->backend_state = BEST_CLOSED;
+ /* todo: take dev down and free. */
+ vif_show(np); DPRINTK("<\n");
+}
+
+/* Move the vif into disconnected state.
+ * Allocates tx/rx pages.
+ * Sends connect message to xend.
+ */
+static void vif_disconnect(struct net_private *np){
+ DPRINTK(">\n");
+ if(np->tx) free_page((unsigned long)np->tx);
+ if(np->rx) free_page((unsigned long)np->rx);
+ // Before this np->tx and np->rx had better be null.
+ np->tx = (netif_tx_interface_t *)__get_free_page(GFP_KERNEL);
+ np->rx = (netif_rx_interface_t *)__get_free_page(GFP_KERNEL);
+ memset(np->tx, 0, PAGE_SIZE);
+ memset(np->rx, 0, PAGE_SIZE);
+ np->backend_state = BEST_DISCONNECTED;
+ send_interface_connect(np);
+ vif_show(np); DPRINTK("<\n");
+}
+
+/* Begin interface recovery.
+ *
+ * NB. Whilst we're recovering, we turn the carrier state off. We
+ * take measures to ensure that this device isn't used for
+ * anything. We also stop the queue for this device. Various
+ * different approaches (e.g. continuing to buffer packets) have
+ * been tested but don't appear to improve the overall impact on
+ * TCP connections.
+ *
+ * TODO: (MAW) Change the Xend<->Guest protocol so that a recovery
+ * is initiated by a special "RESET" message - disconnect could
+ * just mean we're not allowed to use this interface any more.
+ */
+static void
+vif_reset(
+ struct net_private *np)
+{
+ DPRINTK(">\n");
+ IPRINTK("Attempting to reconnect network interface: handle=%u\n",
+ np->handle);
+ vif_release(np);
+ vif_disconnect(np);
+ vif_show(np); DPRINTK("<\n");
+}
+
+/* Move the vif into connected state.
+ * Sets the mac and event channel from the message.
+ * Binds the irq to the event channel.
+ */
+static void
+vif_connect(
+ struct net_private *np, netif_fe_interface_status_t *status)
+{
+ struct net_device *dev = np->dev;
+ DPRINTK(">\n");
+ memcpy(dev->dev_addr, status->mac, ETH_ALEN);
+ network_connect(dev, status);
+ np->evtchn = status->evtchn;
+ np->irq = bind_evtchn_to_irq(np->evtchn);
+ (void)request_irq(np->irq, netif_int, SA_SAMPLE_RANDOM, dev->name, dev);
+ netctrl_connected_count();
+ vif_wake(dev);
+ vif_show(np); DPRINTK("<\n");
+}
+
+
+/** Create a network device.
+ * @param handle device handle
+ * @param val return parameter for created device
+ * @return 0 on success, error code otherwise
+ */
+static int create_netdev(int handle, struct net_device **val)
+{
+ int i, err = 0;
+ struct net_device *dev = NULL;
+ struct net_private *np = NULL;
+
+ if ( (dev = alloc_etherdev(sizeof(struct net_private))) == NULL )
+ {
+ printk(KERN_WARNING "%s> alloc_etherdev failed.\n", __FUNCTION__);
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ np = dev->priv;
+ np->backend_state = BEST_CLOSED;
+ np->user_state = UST_CLOSED;
+ np->handle = handle;
+
+ spin_lock_init(&np->tx_lock);
+ spin_lock_init(&np->rx_lock);
+
+ skb_queue_head_init(&np->rx_batch);
+ np->rx_target = RX_MIN_TARGET;
+
+ /* Initialise {tx,rx}_skbs to be a free chain containing every entry. */
+ for ( i = 0; i <= NETIF_TX_RING_SIZE; i++ )
+ np->tx_skbs[i] = (void *)(i+1);
+ for ( i = 0; i <= NETIF_RX_RING_SIZE; i++ )
+ np->rx_skbs[i] = (void *)(i+1);
+
+ dev->open = network_open;
+ dev->hard_start_xmit = network_start_xmit;
+ dev->stop = network_close;
+ dev->get_stats = network_get_stats;
+ dev->poll = netif_poll;
+ dev->weight = 64;
+
+ if ( (err = register_netdev(dev)) != 0 )
+ {
+ printk(KERN_WARNING "%s> register_netdev err=%d\n", __FUNCTION__, err);
+ goto exit;
+ }
+ np->dev = dev;
+ list_add(&np->list, &dev_list);
+
+ exit:
+ if ( (err != 0) && (dev != NULL ) )
+ kfree(dev);
+ else if ( val != NULL )
+ *val = dev;
+ return err;
+}
+
+/* Get the target interface for a status message.
+ * Creates the interface when it makes sense.
+ * The returned interface may be null when there is no error.
+ *
+ * @param status status message
+ * @param np return parameter for interface state
+ * @return 0 on success, error code otherwise
+ */
+static int
+target_vif(
+ netif_fe_interface_status_t *status, struct net_private **np)
+{
+ int err = 0;
+ struct net_device *dev;
+
+ DPRINTK("> handle=%d\n", status->handle);
+ if ( status->handle < 0 )
+ {
+ err = -EINVAL;
+ goto exit;
+ }
+
+ if ( (dev = find_dev_by_handle(status->handle)) != NULL )
+ goto exit;
+
+ if ( status->status == NETIF_INTERFACE_STATUS_CLOSED )
+ goto exit;
+ if ( status->status == NETIF_INTERFACE_STATUS_CHANGED )
+ goto exit;
+
+ /* It's a new interface in a good state - create it. */
+ DPRINTK("> create device...\n");
+ if ( (err = create_netdev(status->handle, &dev)) != 0 )
+ goto exit;
+
+ netctrl.interface_n++;
+
+ exit:
+ if ( np != NULL )
+ *np = ((dev && !err) ? dev->priv : NULL);
+ DPRINTK("< err=%d\n", err);
+ return err;
+}
+
+/* Handle an interface status message. */
+static void netif_interface_status(netif_fe_interface_status_t *status)
+{
+ int err = 0;
+ struct net_private *np = NULL;
+
+ DPRINTK(">\n");
+ DPRINTK("> status=%s handle=%d\n",
+ status_name[status->status], status->handle);
+
+ if ( (err = target_vif(status, &np)) != 0 )
+ {
+ WPRINTK("Invalid netif: handle=%u\n", status->handle);
+ return;
+ }
+
+ if ( np == NULL )
+ {
+ DPRINTK("> no vif\n");
+ return;
+ }
+
+ DPRINTK(">\n"); vif_show(np);
+
+ switch ( status->status )
+ {
+ case NETIF_INTERFACE_STATUS_CLOSED:
+ switch ( np->backend_state )
+ {
+ case BEST_CLOSED:
+ case BEST_DISCONNECTED:
+ case BEST_CONNECTED:
+ vif_close(np);
+ break;
+ }
+ break;
+
+ case NETIF_INTERFACE_STATUS_DISCONNECTED:
+ switch ( np->backend_state )
+ {
+ case BEST_CLOSED:
+ vif_disconnect(np);
+ break;
+ case BEST_DISCONNECTED:
+ case BEST_CONNECTED:
+ vif_reset(np);
+ break;
+ }
+ break;
+
+ case NETIF_INTERFACE_STATUS_CONNECTED:
+ switch ( np->backend_state )
+ {
+ case BEST_CLOSED:
+ WPRINTK("Unexpected netif status %s in state %s\n",
+ status_name[status->status],
+ be_state_name[np->backend_state]);
+ vif_disconnect(np);
+ vif_connect(np, status);
+ break;
+ case BEST_DISCONNECTED:
+ vif_connect(np, status);
+ break;
+ }
+ break;
+
+ case NETIF_INTERFACE_STATUS_CHANGED:
+ /*
+ * The domain controller is notifying us that a device has been
+ * added or removed.
+ */
+ break;
+
+ default:
+ WPRINTK("Invalid netif status code %d\n", status->status);
+ break;
+ }
+ vif_show(np);
+ DPRINTK("<\n");
+}
+
+/*
+ * Initialize the network control interface.
+ */
+static void netif_driver_status(netif_fe_driver_status_t *status)
+{
+ DPRINTK("> status=%d\n", status->status);
+ netctrl.up = status->status;
+ //netctrl.interface_n = status->max_handle;
+ //netctrl.connected_n = 0;
+ netctrl_connected_count();
+}
+
+/* Receive handler for control messages. */
+static void netif_ctrlif_rx(ctrl_msg_t *msg, unsigned long id)
+{
+
+ switch ( msg->subtype )
+ {
+ case CMSG_NETIF_FE_INTERFACE_STATUS:
+ if ( msg->length != sizeof(netif_fe_interface_status_t) )
+ goto error;
+ netif_interface_status((netif_fe_interface_status_t *)
+ &msg->msg[0]);
+ break;
+
+ case CMSG_NETIF_FE_DRIVER_STATUS:
+ if ( msg->length != sizeof(netif_fe_driver_status_t) )
+ goto error;
+ netif_driver_status((netif_fe_driver_status_t *)
+ &msg->msg[0]);
+ break;
+
+ error:
+ default:
+ msg->length = 0;
+ break;
+ }
+
+ ctrl_if_send_response(msg);
+}
+
+
+#if 1
+/* Wait for all interfaces to be connected.
+ *
+ * This works OK, but we'd like to use the probing mode (see below).
+ */
+static int probe_interfaces(void)
+{
+ int err = 0, conn = 0;
+ int wait_i, wait_n = 100;
+
+ DPRINTK(">\n");
+
+ for ( wait_i = 0; wait_i < wait_n; wait_i++)
+ {
+ DPRINTK("> wait_i=%d\n", wait_i);
+ conn = netctrl_connected();
+ if(conn) break;
+ DPRINTK("> schedule_timeout...\n");
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(10);
+ }
+
+ DPRINTK("> wait finished...\n");
+ if ( conn <= 0 )
+ {
+ err = netctrl_err(-ENETDOWN);
+ WPRINTK("Failed to connect all virtual interfaces: err=%d\n", err);
+ }
+
+ DPRINTK("< err=%d\n", err);
+
+ return err;
+}
+#else
+/* Probe for interfaces until no more are found.
+ *
+ * This is the mode we'd like to use, but at the moment it panics the kernel.
+*/
+static int probe_interfaces(void)
+{
+ int err = 0;
+ int wait_i, wait_n = 100;
+ ctrl_msg_t cmsg = {
+ .type = CMSG_NETIF_FE,
+ .subtype = CMSG_NETIF_FE_INTERFACE_STATUS,
+ .length = sizeof(netif_fe_interface_status_t),
+ };
+ netif_fe_interface_status_t msg = {};
+ ctrl_msg_t rmsg = {};
+ netif_fe_interface_status_t *reply = (void*)rmsg.msg;
+ int state = TASK_UNINTERRUPTIBLE;
+ u32 query = -1;
+
+ DPRINTK(">\n");
+
+ netctrl.interface_n = 0;
+ for ( wait_i = 0; wait_i < wait_n; wait_i++ )
+ {
+ DPRINTK("> wait_i=%d query=%d\n", wait_i, query);
+ msg.handle = query;
+ memcpy(cmsg.msg, &msg, sizeof(msg));
+ DPRINTK("> set_current_state...\n");
+ set_current_state(state);
+ DPRINTK("> rmsg=%p msg=%p, reply=%p\n", &rmsg, rmsg.msg, reply);
+ DPRINTK("> sending...\n");
+ err = ctrl_if_send_message_and_get_response(&cmsg, &rmsg, state);
+ DPRINTK("> err=%d\n", err);
+ if(err) goto exit;
+ DPRINTK("> rmsg=%p msg=%p, reply=%p\n", &rmsg, rmsg.msg, reply);
+ if((int)reply->handle < 0){
+ // No more interfaces.
+ break;
+ }
+ query = -reply->handle - 2;
+ DPRINTK(">netif_interface_status ...\n");
+ netif_interface_status(reply);
+ }
+
+ exit:
+ if ( err )
+ {
+ err = netctrl_err(-ENETDOWN);
+ WPRINTK("Connecting virtual network interfaces failed: err=%d\n", err);
+ }
+
+ DPRINTK("< err=%d\n", err);
+ return err;
+}
+
+#endif
+
+static int __init netif_init(void)
+{
+ int err = 0;
+
+ if ( (xen_start_info.flags & SIF_INITDOMAIN) ||
+ (xen_start_info.flags & SIF_NET_BE_DOMAIN) )
+ return 0;
+
+ IPRINTK("Initialising virtual ethernet driver.\n");
+ INIT_LIST_HEAD(&dev_list);
+ netctrl_init();
+ (void)ctrl_if_register_receiver(CMSG_NETIF_FE, netif_ctrlif_rx,
+ CALLBACK_IN_BLOCKING_CONTEXT);
+ send_driver_status(1);
+ err = probe_interfaces();
+ if ( err )
+ ctrl_if_unregister_receiver(CMSG_NETIF_FE, netif_ctrlif_rx);
+
+ DPRINTK("< err=%d\n", err);
+ return err;
+}
+
+static void vif_suspend(struct net_private *np)
+{
+ // Avoid having tx/rx stuff happen until we're ready.
+ DPRINTK(">\n");
+ free_irq(np->irq, np->dev);
+ unbind_evtchn_from_irq(np->evtchn);
+ DPRINTK("<\n");
+}
+
+static void vif_resume(struct net_private *np)
+{
+ // Connect regardless of whether IFF_UP flag set.
+ // Stop bad things from happening until we're back up.
+ DPRINTK(">\n");
+ np->backend_state = BEST_DISCONNECTED;
+ memset(np->tx, 0, PAGE_SIZE);
+ memset(np->rx, 0, PAGE_SIZE);
+
+ send_interface_connect(np);
+ DPRINTK("<\n");
+}
+
+void netif_suspend(void)
+{
+#if 1 /* XXX THIS IS TEMPORARY */
+ struct list_head *ent;
+ struct net_private *np;
+
+ DPRINTK(">\n");
+ list_for_each(ent, &dev_list){
+ np = list_entry(ent, struct net_private, list);
+ vif_suspend(np);
+ }
+ DPRINTK("<\n");
+#endif
+}
+
+void netif_resume(void)
+{
+#if 1
+ /* XXX THIS IS TEMPORARY */
+ struct list_head *ent;
+ struct net_private *np;
+
+ DPRINTK(">\n");
+ list_for_each ( ent, &dev_list )
+ {
+ np = list_entry(ent, struct net_private, list);
+ vif_resume(np);
+ }
+ DPRINTK("<\n");
+#endif
+}
+
+
+__initcall(netif_init);
--- /dev/null
- #include <linux/module.h>
+/******************************************************************************
+ * privcmd.c
+ *
+ * Interface to privileged domain-0 commands.
+ *
+ * Copyright (c) 2002-2004, K A Fraser, B Dragovic
+ */
+
+#include <linux/config.h>
- {
- privcmd_intf->owner = THIS_MODULE;
- privcmd_intf->nlink = 1;
- privcmd_intf->proc_fops = &privcmd_file_ops;
- }
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/mm.h>
+#include <linux/mman.h>
+#include <linux/swap.h>
+#include <linux/smp_lock.h>
+#include <linux/highmem.h>
+#include <linux/pagemap.h>
+#include <linux/seq_file.h>
+
+#include <asm/pgalloc.h>
+#include <asm/pgtable.h>
+#include <asm/uaccess.h>
+#include <asm/tlb.h>
+#include <asm-xen/linux-public/privcmd.h>
+#include <asm-xen/xen-public/dom0_ops.h>
+#include <asm-xen/xen_proc.h>
+
+static struct proc_dir_entry *privcmd_intf;
+
+static int privcmd_ioctl(struct inode *inode, struct file *file,
+ unsigned int cmd, unsigned long data)
+{
+ int ret = -ENOSYS;
+
+ switch ( cmd )
+ {
+ case IOCTL_PRIVCMD_HYPERCALL:
+ {
+ privcmd_hypercall_t hypercall;
+
+ if ( copy_from_user(&hypercall, (void *)data, sizeof(hypercall)) )
+ return -EFAULT;
+
+ __asm__ __volatile__ (
+ "pushl %%ebx; pushl %%ecx; pushl %%edx; pushl %%esi; pushl %%edi; "
+ "movl 4(%%eax),%%ebx ;"
+ "movl 8(%%eax),%%ecx ;"
+ "movl 12(%%eax),%%edx ;"
+ "movl 16(%%eax),%%esi ;"
+ "movl 20(%%eax),%%edi ;"
+ "movl (%%eax),%%eax ;"
+ TRAP_INSTR "; "
+ "popl %%edi; popl %%esi; popl %%edx; popl %%ecx; popl %%ebx"
+ : "=a" (ret) : "0" (&hypercall) : "memory" );
+
+ }
+ break;
+
+ case IOCTL_PRIVCMD_INITDOMAIN_EVTCHN:
+ {
+ extern int initdom_ctrlif_domcontroller_port;
+ ret = initdom_ctrlif_domcontroller_port;
+ }
+ break;
+
+#if defined(CONFIG_XEN_PRIVILEGED_GUEST)
+ case IOCTL_PRIVCMD_MMAP:
+ {
+#define PRIVCMD_MMAP_SZ 32
+ privcmd_mmap_t mmapcmd;
+ privcmd_mmap_entry_t msg[PRIVCMD_MMAP_SZ], *p;
+ int i, rc;
+
+ if ( copy_from_user(&mmapcmd, (void *)data, sizeof(mmapcmd)) )
+ return -EFAULT;
+
+ p = mmapcmd.entry;
+
+ for (i=0; i<mmapcmd.num; i+=PRIVCMD_MMAP_SZ, p+=PRIVCMD_MMAP_SZ)
+ {
+ int j, n = ((mmapcmd.num-i)>PRIVCMD_MMAP_SZ)?
+ PRIVCMD_MMAP_SZ:(mmapcmd.num-i);
+ if ( copy_from_user(&msg, p, n*sizeof(privcmd_mmap_entry_t)) )
+ return -EFAULT;
+
+ for ( j = 0; j < n; j++ )
+ {
+ struct vm_area_struct *vma =
+ find_vma( current->mm, msg[j].va );
+
+ if ( !vma )
+ return -EINVAL;
+
+ if ( msg[j].va > PAGE_OFFSET )
+ return -EINVAL;
+
+ if ( (msg[j].va + (msg[j].npages<<PAGE_SHIFT)) > vma->vm_end )
+ return -EINVAL;
+
+ if ( (rc = direct_remap_area_pages(vma->vm_mm,
+ msg[j].va&PAGE_MASK,
+ msg[j].mfn<<PAGE_SHIFT,
+ msg[j].npages<<PAGE_SHIFT,
+ vma->vm_page_prot,
+ mmapcmd.dom)) < 0 )
+ return rc;
+ }
+ }
+ ret = 0;
+ }
+ break;
+
+ case IOCTL_PRIVCMD_MMAPBATCH:
+ {
+#define MAX_DIRECTMAP_MMU_QUEUE 130
+ mmu_update_t u[MAX_DIRECTMAP_MMU_QUEUE], *w, *v;
+ privcmd_mmapbatch_t m;
+ struct vm_area_struct *vma = NULL;
+ unsigned long *p, addr;
+ unsigned long mfn;
+ int i;
+
+ if ( copy_from_user(&m, (void *)data, sizeof(m)) )
+ { ret = -EFAULT; goto batch_err; }
+
+ vma = find_vma( current->mm, m.addr );
+
+ if ( !vma )
+ { ret = -EINVAL; goto batch_err; }
+
+ if ( m.addr > PAGE_OFFSET )
+ { ret = -EFAULT; goto batch_err; }
+
+ if ( (m.addr + (m.num<<PAGE_SHIFT)) > vma->vm_end )
+ { ret = -EFAULT; goto batch_err; }
+
+ u[0].ptr = MMU_EXTENDED_COMMAND;
+ u[0].val = MMUEXT_SET_FOREIGNDOM;
+ u[0].val |= (unsigned long)m.dom << 16;
+ v = w = &u[1];
+
+ p = m.arr;
+ addr = m.addr;
+ for ( i = 0; i < m.num; i++, addr += PAGE_SIZE, p++ )
+ {
+ if ( get_user(mfn, p) )
+ return -EFAULT;
+
+ v->val = (mfn << PAGE_SHIFT) | pgprot_val(vma->vm_page_prot);
+
+ __direct_remap_area_pages(vma->vm_mm,
+ addr,
+ PAGE_SIZE,
+ v);
+
+ if ( unlikely(HYPERVISOR_mmu_update(u, v - u + 1, NULL) < 0) )
+ put_user( 0xF0000000 | mfn, p );
+
+ v = w;
+ }
+ ret = 0;
+ break;
+
+ batch_err:
+ printk("batch_err ret=%d vma=%p addr=%lx num=%d arr=%p %lx-%lx\n",
+ ret, vma, m.addr, m.num, m.arr, vma->vm_start, vma->vm_end);
+ break;
+ }
+ break;
+#endif
+
+ case IOCTL_PRIVCMD_GET_MACH2PHYS_START_MFN:
+ {
+ unsigned long m2p_start_mfn =
+ HYPERVISOR_shared_info->arch.mfn_to_pfn_start;
+
+ if( put_user( m2p_start_mfn, (unsigned long *) data ) )
+ ret = -EFAULT;
+ else
+ ret = 0;
+ }
+ break;
+
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
+}
+
+static int privcmd_mmap(struct file * file, struct vm_area_struct * vma)
+{
+ /* DONTCOPY is essential for Xen as copy_page_range is broken. */
+ vma->vm_flags |= VM_RESERVED | VM_IO | VM_DONTCOPY;
+
+ return 0;
+}
+
+static struct file_operations privcmd_file_ops = {
+ ioctl : privcmd_ioctl,
+ mmap: privcmd_mmap
+};
+
+
+static int __init privcmd_init(void)
+{
+ if ( !(xen_start_info.flags & SIF_PRIVILEGED) )
+ return 0;
+
+ privcmd_intf = create_xen_proc_entry("privcmd", 0400);
+ if ( privcmd_intf != NULL )
-
- static void __exit privcmd_cleanup(void)
- {
- if ( privcmd_intf == NULL ) return;
- remove_xen_proc_entry("privcmd");
- privcmd_intf = NULL;
- }
-
-
- module_init(privcmd_init);
- module_exit(privcmd_cleanup);
++ privcmd_intf->proc_fops = &privcmd_file_ops;
+
+ return 0;
+}
+
++__initcall(privcmd_init);
--- /dev/null
+#ifndef _ASM_IO_H
+#define _ASM_IO_H
+
+#include <linux/config.h>
+#include <linux/string.h>
+#include <linux/compiler.h>
+
+/*
+ * This file contains the definitions for the x86 IO instructions
+ * inb/inw/inl/outb/outw/outl and the "string versions" of the same
+ * (insb/insw/insl/outsb/outsw/outsl). You can also use "pausing"
+ * versions of the single-IO instructions (inb_p/inw_p/..).
+ *
+ * This file is not meant to be obfuscating: it's just complicated
+ * to (a) handle it all in a way that makes gcc able to optimize it
+ * as well as possible and (b) trying to avoid writing the same thing
+ * over and over again with slight variations and possibly making a
+ * mistake somewhere.
+ */
+
+/*
+ * Thanks to James van Artsdalen for a better timing-fix than
+ * the two short jumps: using outb's to a nonexistent port seems
+ * to guarantee better timings even on fast machines.
+ *
+ * On the other hand, I'd like to be sure of a non-existent port:
+ * I feel a bit unsafe about using 0x80 (should be safe, though)
+ *
+ * Linus
+ */
+
+ /*
+ * Bit simplified and optimized by Jan Hubicka
+ * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999.
+ *
+ * isa_memset_io, isa_memcpy_fromio, isa_memcpy_toio added,
+ * isa_read[wl] and isa_write[wl] fixed
+ * - Arnaldo Carvalho de Melo <acme@conectiva.com.br>
+ */
+
+#define IO_SPACE_LIMIT 0xffff
+
+#define XQUAD_PORTIO_BASE 0xfe400000
+#define XQUAD_PORTIO_QUAD 0x40000 /* 256k per quad. */
+
+#ifdef __KERNEL__
+
+#include <asm-generic/iomap.h>
+
+#include <linux/vmalloc.h>
+#include <asm/fixmap.h>
+
+/**
+ * virt_to_phys - map virtual addresses to physical
+ * @address: address to remap
+ *
+ * The returned physical address is the physical (CPU) mapping for
+ * the memory address given. It is only valid to use this function on
+ * addresses directly mapped or allocated via kmalloc.
+ *
+ * This function does not give bus mappings for DMA transfers. In
+ * almost all conceivable cases a device driver should not be using
+ * this function
+ */
+
+static inline unsigned long virt_to_phys(volatile void * address)
+{
+ return __pa(address);
+}
+
+/**
+ * phys_to_virt - map physical address to virtual
+ * @address: address to remap
+ *
+ * The returned virtual address is a current CPU mapping for
+ * the memory address given. It is only valid to use this function on
+ * addresses that have a kernel mapping
+ *
+ * This function does not handle bus mappings for DMA transfers. In
+ * almost all conceivable cases a device driver should not be using
+ * this function
+ */
+
+static inline void * phys_to_virt(unsigned long address)
+{
+ return __va(address);
+}
+
+/*
+ * Change "struct page" to physical address.
+ */
+#define page_to_pseudophys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
+#define page_to_phys(page) (phys_to_machine(page_to_pseudophys(page)))
+
+#define bio_to_pseudophys(bio) (page_to_pseudophys(bio_page((bio))) + \
+ (unsigned long) bio_offset((bio)))
+#define bvec_to_pseudophys(bv) (page_to_pseudophys((bv)->bv_page) + \
+ (unsigned long) (bv)->bv_offset)
+
+#define BIOVEC_PHYS_MERGEABLE(vec1, vec2) \
+ (((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2))) && \
+ ((bvec_to_pseudophys((vec1)) + (vec1)->bv_len) == \
+ bvec_to_pseudophys((vec2))))
+
+extern void __iomem * __ioremap(unsigned long offset, unsigned long size, unsigned long flags);
+
+/**
+ * ioremap - map bus memory into CPU space
+ * @offset: bus address of the memory
+ * @size: size of the resource to map
+ *
+ * ioremap performs a platform specific sequence of operations to
+ * make bus memory CPU accessible via the readb/readw/readl/writeb/
+ * writew/writel functions and the other mmio helpers. The returned
+ * address is not guaranteed to be usable directly as a virtual
+ * address.
+ */
+
+static inline void __iomem * ioremap(unsigned long offset, unsigned long size)
+{
+ return __ioremap(offset, size, 0);
+}
+
+extern void __iomem * ioremap_nocache(unsigned long offset, unsigned long size);
+extern void iounmap(volatile void __iomem *addr);
+
+/*
+ * bt_ioremap() and bt_iounmap() are for temporary early boot-time
+ * mappings, before the real ioremap() is functional.
+ * A boot-time mapping is currently limited to at most 16 pages.
+ */
+extern void *bt_ioremap(unsigned long offset, unsigned long size);
+extern void bt_iounmap(void *addr, unsigned long size);
+
+/*
+ * ISA I/O bus memory addresses are 1:1 with the physical address.
+ */
+#define isa_virt_to_bus(_x) isa_virt_to_bus_is_UNSUPPORTED->x
+#define isa_page_to_bus(_x) isa_page_to_bus_is_UNSUPPORTED->x
+#ifdef CONFIG_XEN_PHYSDEV_ACCESS
+#define isa_bus_to_virt(_x) (void *)(__fix_to_virt(FIX_ISAMAP_BEGIN) + (_x))
+#else
+#define isa_bus_to_virt(_x) isa_bus_to_virt_needs_PRIVILEGED_BUILD
+#endif
+
+/*
+ * However PCI ones are not necessarily 1:1 and therefore these interfaces
+ * are forbidden in portable PCI drivers.
+ *
+ * Allow them on x86 for legacy drivers, though.
+ */
+#define virt_to_bus(_x) phys_to_machine(__pa(_x))
+#define bus_to_virt(_x) __va(machine_to_phys(_x))
+
+/*
+ * readX/writeX() are used to access memory mapped devices. On some
+ * architectures the memory mapped IO stuff needs to be accessed
+ * differently. On the x86 architecture, we just read/write the
+ * memory location directly.
+ */
+
+static inline unsigned char readb(const volatile void __iomem *addr)
+{
+ return *(volatile unsigned char __force *) addr;
+}
+static inline unsigned short readw(const volatile void __iomem *addr)
+{
+ return *(volatile unsigned short __force *) addr;
+}
+static inline unsigned int readl(const volatile void __iomem *addr)
+{
+ return *(volatile unsigned int __force *) addr;
+}
+#define readb_relaxed(addr) readb(addr)
+#define readw_relaxed(addr) readw(addr)
+#define readl_relaxed(addr) readl(addr)
+#define __raw_readb readb
+#define __raw_readw readw
+#define __raw_readl readl
+
+static inline void writeb(unsigned char b, volatile void __iomem *addr)
+{
+ *(volatile unsigned char __force *) addr = b;
+}
+static inline void writew(unsigned short b, volatile void __iomem *addr)
+{
+ *(volatile unsigned short __force *) addr = b;
+}
+static inline void writel(unsigned int b, volatile void __iomem *addr)
+{
+ *(volatile unsigned int __force *) addr = b;
+}
+#define __raw_writeb writeb
+#define __raw_writew writew
+#define __raw_writel writel
+
+#define mmiowb()
+
+static inline void memset_io(volatile void __iomem *addr, unsigned char val, int count)
+{
+ memset((void __force *) addr, val, count);
+}
+static inline void memcpy_fromio(void *dst, volatile void __iomem *src, int count)
+{
+ __memcpy(dst, (void __force *) src, count);
+}
+static inline void memcpy_toio(volatile void __iomem *dst, const void *src, int count)
+{
+ __memcpy((void __force *) dst, src, count);
+}
+
+/*
+ * ISA space is 'always mapped' on a typical x86 system, no need to
+ * explicitly ioremap() it. The fact that the ISA IO space is mapped
+ * to PAGE_OFFSET is pure coincidence - it does not mean ISA values
+ * are physical addresses. The following constant pointer can be
+ * used as the IO-area pointer (it can be iounmapped as well, so the
+ * analogy with PCI is quite large):
+ */
+#define __ISA_IO_base ((char __iomem *)(PAGE_OFFSET))
+
+#define isa_readb(a) readb(__ISA_IO_base + (a))
+#define isa_readw(a) readw(__ISA_IO_base + (a))
+#define isa_readl(a) readl(__ISA_IO_base + (a))
+#define isa_writeb(b,a) writeb(b,__ISA_IO_base + (a))
+#define isa_writew(w,a) writew(w,__ISA_IO_base + (a))
+#define isa_writel(l,a) writel(l,__ISA_IO_base + (a))
+#define isa_memset_io(a,b,c) memset_io(__ISA_IO_base + (a),(b),(c))
+#define isa_memcpy_fromio(a,b,c) memcpy_fromio((a),__ISA_IO_base + (b),(c))
+#define isa_memcpy_toio(a,b,c) memcpy_toio(__ISA_IO_base + (a),(b),(c))
+
+
+/*
+ * Again, i386 does not require mem IO specific function.
+ */
+
+#define eth_io_copy_and_sum(a,b,c,d) eth_copy_and_sum((a),(void __force *)(b),(c),(d))
+#define isa_eth_io_copy_and_sum(a,b,c,d) eth_copy_and_sum((a),(void __force *)(__ISA_IO_base + (b)),(c),(d))
+
+/**
+ * check_signature - find BIOS signatures
+ * @io_addr: mmio address to check
+ * @signature: signature block
+ * @length: length of signature
+ *
+ * Perform a signature comparison with the mmio address io_addr. This
+ * address should have been obtained by ioremap.
+ * Returns 1 on a match.
+ */
+
+static inline int check_signature(volatile void __iomem * io_addr,
+ const unsigned char *signature, int length)
+{
+ int retval = 0;
+ do {
+ if (readb(io_addr) != *signature)
+ goto out;
+ io_addr++;
+ signature++;
+ length--;
+ } while (length);
+ retval = 1;
+out:
+ return retval;
+}
+
+/**
+ * isa_check_signature - find BIOS signatures
+ * @io_addr: mmio address to check
+ * @signature: signature block
+ * @length: length of signature
+ *
+ * Perform a signature comparison with the ISA mmio address io_addr.
+ * Returns 1 on a match.
+ *
+ * This function is deprecated. New drivers should use ioremap and
+ * check_signature.
+ */
+
+
+static inline int isa_check_signature(unsigned long io_addr,
+ const unsigned char *signature, int length)
+{
+ int retval = 0;
+ do {
+ if (isa_readb(io_addr) != *signature)
+ goto out;
+ io_addr++;
+ signature++;
+ length--;
+ } while (length);
+ retval = 1;
+out:
+ return retval;
+}
+
+/*
+ * Cache management
+ *
+ * This needed for two cases
+ * 1. Out of order aware processors
+ * 2. Accidentally out of order processors (PPro errata #51)
+ */
+
+#if defined(CONFIG_X86_OOSTORE) || defined(CONFIG_X86_PPRO_FENCE)
+
+static inline void flush_write_buffers(void)
+{
+ __asm__ __volatile__ ("lock; addl $0,0(%%esp)": : :"memory");
+}
+
+#define dma_cache_inv(_start,_size) flush_write_buffers()
+#define dma_cache_wback(_start,_size) flush_write_buffers()
+#define dma_cache_wback_inv(_start,_size) flush_write_buffers()
+
+#else
+
+/* Nothing to do */
+
+#define dma_cache_inv(_start,_size) do { } while (0)
+#define dma_cache_wback(_start,_size) do { } while (0)
+#define dma_cache_wback_inv(_start,_size) do { } while (0)
+#define flush_write_buffers()
+
+#endif
+
+#endif /* __KERNEL__ */
+
+#ifdef SLOW_IO_BY_JUMPING
+#define __SLOW_DOWN_IO "jmp 1f; 1: jmp 1f; 1:"
+#elif defined(__UNSAFE_IO__)
+#define __SLOW_DOWN_IO "outb %%al,$0x80;"
+#else
+#define __SLOW_DOWN_IO "\n1: outb %%al,$0x80\n" \
+ "2:\n" \
+ ".section __ex_table,\"a\"\n\t" \
+ ".align 4\n\t" \
+ ".long 1b,2b\n" \
+ ".previous"
+#endif
+
+static inline void slow_down_io(void) {
+ __asm__ __volatile__(
+ __SLOW_DOWN_IO
+#ifdef REALLY_SLOW_IO
+ __SLOW_DOWN_IO __SLOW_DOWN_IO __SLOW_DOWN_IO
+#endif
+ : : );
+}
+
+#ifdef CONFIG_X86_NUMAQ
+extern void *xquad_portio; /* Where the IO area was mapped */
+#define XQUAD_PORT_ADDR(port, quad) (xquad_portio + (XQUAD_PORTIO_QUAD*quad) + port)
+#define __BUILDIO(bwl,bw,type) \
+static inline void out##bwl##_quad(unsigned type value, int port, int quad) { \
+ if (xquad_portio) \
+ write##bwl(value, XQUAD_PORT_ADDR(port, quad)); \
+ else \
+ out##bwl##_local(value, port); \
+} \
+static inline void out##bwl(unsigned type value, int port) { \
+ out##bwl##_quad(value, port, 0); \
+} \
+static inline unsigned type in##bwl##_quad(int port, int quad) { \
+ if (xquad_portio) \
+ return read##bwl(XQUAD_PORT_ADDR(port, quad)); \
+ else \
+ return in##bwl##_local(port); \
+} \
+static inline unsigned type in##bwl(int port) { \
+ return in##bwl##_quad(port, 0); \
+}
+#else
+#define __BUILDIO(bwl,bw,type) \
+static inline void out##bwl(unsigned type value, int port) { \
+ out##bwl##_local(value, port); \
+} \
+static inline unsigned type in##bwl(int port) { \
+ return in##bwl##_local(port); \
+}
+#endif
+
+
+#if __UNSAFE_IO__
+#define ____BUILDIO(bwl,bw,type) \
+static inline void out##bwl##_local(unsigned type value, int port) { \
+ __asm__ __volatile__("out" #bwl " %" #bw "0, %w1" : : "a"(value), "Nd"(port)); \
+} \
+static inline unsigned type in##bwl##_local(int port) { \
+ unsigned type value; \
+ __asm__ __volatile__("in" #bwl " %w1, %" #bw "0" : "=a"(value) : "Nd"(port)); \
+ return value; \
+}
+#else
+#define ____BUILDIO(bwl,bw,type) \
+static inline void out##bwl##_local(unsigned type value, int port) { \
+ __asm__ __volatile__("1: out" #bwl " %" #bw "0, %w1\n" \
+ "2:\n" \
+ ".section __ex_table,\"a\"\n\t" \
+ ".align 4\n\t" \
+ ".long 1b,2b\n" \
+ ".previous" : : "a"(value), "Nd"(port)); \
+} \
+static inline unsigned type in##bwl##_local(int port) { \
+ unsigned type value; \
+ __asm__ __volatile__("1:in" #bwl " %w1, %" #bw "0\n" \
+ "2:\n" \
+ ".section .fixup,\"ax\"\n" \
+ "3: mov" #bwl " $~0,%" #bw "0\n\t" \
+ "jmp 2b\n" \
+ ".previous\n" \
+ ".section __ex_table,\"a\"\n\t" \
+ ".align 4\n\t" \
+ ".long 1b,3b\n" \
+ ".previous" : "=a"(value) : "Nd"(port)); \
+ return value; \
+}
+#endif
+
+#define BUILDIO(bwl,bw,type) \
+____BUILDIO(bwl,bw,type) \
+static inline void out##bwl##_local_p(unsigned type value, int port) { \
+ out##bwl##_local(value, port); \
+ slow_down_io(); \
+} \
+static inline unsigned type in##bwl##_local_p(int port) { \
+ unsigned type value = in##bwl##_local(port); \
+ slow_down_io(); \
+ return value; \
+} \
+__BUILDIO(bwl,bw,type) \
+static inline void out##bwl##_p(unsigned type value, int port) { \
+ out##bwl(value, port); \
+ slow_down_io(); \
+} \
+static inline unsigned type in##bwl##_p(int port) { \
+ unsigned type value = in##bwl(port); \
+ slow_down_io(); \
+ return value; \
+} \
+static inline void outs##bwl(int port, const void *addr, unsigned long count) { \
+ __asm__ __volatile__("rep; outs" #bwl : "+S"(addr), "+c"(count) : "d"(port)); \
+} \
+static inline void ins##bwl(int port, void *addr, unsigned long count) { \
+ __asm__ __volatile__("rep; ins" #bwl : "+D"(addr), "+c"(count) : "d"(port)); \
+}
+
+BUILDIO(b,b,char)
+BUILDIO(w,w,short)
+BUILDIO(l,,int)
+
++/* We will be supplying our own /dev/mem implementation */
++#define ARCH_HAS_DEV_MEM
++
+#endif
--- /dev/null
- if (likely(vma->vm_mm == current->mm)) { \
- xen_flush_page_update_queue(); \
- HYPERVISOR_update_va_mapping(address>>PAGE_SHIFT, \
- entry, UVMF_INVLPG); \
- } else { \
- xen_l1_entry_update((__ptep), (__entry).pte_low); \
- flush_tlb_page(__vma, __address); \
- } \
+#ifndef _I386_PGTABLE_H
+#define _I386_PGTABLE_H
+
+#include <linux/config.h>
+#include <asm-xen/hypervisor.h>
+
+/*
+ * The Linux memory management assumes a three-level page table setup. On
+ * the i386, we use that, but "fold" the mid level into the top-level page
+ * table, so that we physically have the same two-level page table as the
+ * i386 mmu expects.
+ *
+ * This file contains the functions and defines necessary to modify and use
+ * the i386 page table tree.
+ */
+#ifndef __ASSEMBLY__
+#include <asm/processor.h>
+#include <asm/fixmap.h>
+#include <linux/threads.h>
+
+#ifndef _I386_BITOPS_H
+#include <asm/bitops.h>
+#endif
+
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+
+/*
+ * ZERO_PAGE is a global shared page that is always zero: used
+ * for zero-mapped memory areas etc..
+ */
+#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
+extern unsigned long empty_zero_page[1024];
+extern pgd_t swapper_pg_dir[1024];
+extern kmem_cache_t *pgd_cache;
+extern kmem_cache_t *pmd_cache;
+extern kmem_cache_t *pte_cache;
+extern spinlock_t pgd_lock;
+extern struct page *pgd_list;
+
+void pte_ctor(void *, kmem_cache_t *, unsigned long);
+void pte_dtor(void *, kmem_cache_t *, unsigned long);
+void pmd_ctor(void *, kmem_cache_t *, unsigned long);
+void pgd_ctor(void *, kmem_cache_t *, unsigned long);
+void pgd_dtor(void *, kmem_cache_t *, unsigned long);
+void pgtable_cache_init(void);
+void paging_init(void);
+
+/*
+ * The Linux x86 paging architecture is 'compile-time dual-mode', it
+ * implements both the traditional 2-level x86 page tables and the
+ * newer 3-level PAE-mode page tables.
+ */
+#ifdef CONFIG_X86_PAE
+# include <asm/pgtable-3level-defs.h>
+#else
+# include <asm/pgtable-2level-defs.h>
+#endif
+
+#define PMD_SIZE (1UL << PMD_SHIFT)
+#define PMD_MASK (~(PMD_SIZE-1))
+#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
+#define PGDIR_MASK (~(PGDIR_SIZE-1))
+
+#define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE)
+#define FIRST_USER_PGD_NR 0
+
+#define USER_PGD_PTRS (PAGE_OFFSET >> PGDIR_SHIFT)
+#define KERNEL_PGD_PTRS (PTRS_PER_PGD-USER_PGD_PTRS)
+
+#define TWOLEVEL_PGDIR_SHIFT 22
+#define BOOT_USER_PGD_PTRS (__PAGE_OFFSET >> TWOLEVEL_PGDIR_SHIFT)
+#define BOOT_KERNEL_PGD_PTRS (1024-BOOT_USER_PGD_PTRS)
+
+/* Just any arbitrary offset to the start of the vmalloc VM area: the
+ * current 8MB value just means that there will be a 8MB "hole" after the
+ * physical memory until the kernel virtual memory starts. That means that
+ * any out-of-bounds memory accesses will hopefully be caught.
+ * The vmalloc() routines leaves a hole of 4kB between each vmalloced
+ * area for the same reason. ;)
+ */
+#define VMALLOC_OFFSET (8*1024*1024)
+#define VMALLOC_START (((unsigned long) high_memory + vmalloc_earlyreserve + \
+ 2*VMALLOC_OFFSET-1) & ~(VMALLOC_OFFSET-1))
+#ifdef CONFIG_HIGHMEM
+# define VMALLOC_END (PKMAP_BASE-2*PAGE_SIZE)
+#else
+# define VMALLOC_END (FIXADDR_START-2*PAGE_SIZE)
+#endif
+
+extern void *high_memory;
+extern unsigned long vmalloc_earlyreserve;
+
+/*
+ * The 4MB page is guessing.. Detailed in the infamous "Chapter H"
+ * of the Pentium details, but assuming intel did the straightforward
+ * thing, this bit set in the page directory entry just means that
+ * the page directory entry points directly to a 4MB-aligned block of
+ * memory.
+ */
+#define _PAGE_BIT_PRESENT 0
+#define _PAGE_BIT_RW 1
+#define _PAGE_BIT_USER 2
+#define _PAGE_BIT_PWT 3
+#define _PAGE_BIT_PCD 4
+#define _PAGE_BIT_ACCESSED 5
+#define _PAGE_BIT_DIRTY 6
+#define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page, Pentium+, if present.. */
+#define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
+#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
+#define _PAGE_BIT_UNUSED2 10
+#define _PAGE_BIT_UNUSED3 11
+#define _PAGE_BIT_NX 63
+
+#define _PAGE_PRESENT 0x001
+#define _PAGE_RW 0x002
+#define _PAGE_USER 0x004
+#define _PAGE_PWT 0x008
+#define _PAGE_PCD 0x010
+#define _PAGE_ACCESSED 0x020
+#define _PAGE_DIRTY 0x040
+#define _PAGE_PSE 0x080 /* 4 MB (or 2MB) page, Pentium+, if present.. */
+#define _PAGE_GLOBAL 0x100 /* Global TLB entry PPro+ */
+#define _PAGE_UNUSED1 0x200 /* available for programmer */
+#define _PAGE_UNUSED2 0x400
+#define _PAGE_UNUSED3 0x800
+
+#define _PAGE_FILE 0x040 /* set:pagecache unset:swap */
+#define _PAGE_PROTNONE 0x080 /* If not present */
+#ifdef CONFIG_X86_PAE
+#define _PAGE_NX (1ULL<<_PAGE_BIT_NX)
+#else
+#define _PAGE_NX 0
+#endif
+
+#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY)
+#define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
+#define _PAGE_CHG_MASK (PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
+
+#define PAGE_NONE \
+ __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
+#define PAGE_SHARED \
+ __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED)
+
+#define PAGE_SHARED_EXEC \
+ __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED)
+#define PAGE_COPY_NOEXEC \
+ __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX)
+#define PAGE_COPY_EXEC \
+ __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
+#define PAGE_COPY \
+ PAGE_COPY_NOEXEC
+#define PAGE_READONLY \
+ __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX)
+#define PAGE_READONLY_EXEC \
+ __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
+
+#define _PAGE_KERNEL \
+ (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_NX)
+#define _PAGE_KERNEL_EXEC \
+ (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
+
+extern unsigned long long __PAGE_KERNEL, __PAGE_KERNEL_EXEC;
+#define __PAGE_KERNEL_RO (__PAGE_KERNEL & ~_PAGE_RW)
+#define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD)
+#define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
+#define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
+
+#define PAGE_KERNEL __pgprot(__PAGE_KERNEL)
+#define PAGE_KERNEL_RO __pgprot(__PAGE_KERNEL_RO)
+#define PAGE_KERNEL_EXEC __pgprot(__PAGE_KERNEL_EXEC)
+#define PAGE_KERNEL_NOCACHE __pgprot(__PAGE_KERNEL_NOCACHE)
+#define PAGE_KERNEL_LARGE __pgprot(__PAGE_KERNEL_LARGE)
+#define PAGE_KERNEL_LARGE_EXEC __pgprot(__PAGE_KERNEL_LARGE_EXEC)
+
+/*
+ * The i386 can't do page protection for execute, and considers that
+ * the same are read. Also, write permissions imply read permissions.
+ * This is the closest we can get..
+ */
+#define __P000 PAGE_NONE
+#define __P001 PAGE_READONLY
+#define __P010 PAGE_COPY
+#define __P011 PAGE_COPY
+#define __P100 PAGE_READONLY_EXEC
+#define __P101 PAGE_READONLY_EXEC
+#define __P110 PAGE_COPY_EXEC
+#define __P111 PAGE_COPY_EXEC
+
+#define __S000 PAGE_NONE
+#define __S001 PAGE_READONLY
+#define __S010 PAGE_SHARED
+#define __S011 PAGE_SHARED
+#define __S100 PAGE_READONLY_EXEC
+#define __S101 PAGE_READONLY_EXEC
+#define __S110 PAGE_SHARED_EXEC
+#define __S111 PAGE_SHARED_EXEC
+
+/*
+ * Define this if things work differently on an i386 and an i486:
+ * it will (on an i486) warn about kernel memory accesses that are
+ * done without a 'verify_area(VERIFY_WRITE,..)'
+ */
+#undef TEST_VERIFY_AREA
+
+/* The boot page tables (all created as a single array) */
+extern unsigned long pg0[];
+
+#define pte_present(x) ((x).pte_low & (_PAGE_PRESENT | _PAGE_PROTNONE))
+#define pte_clear(xp) do { set_pte(xp, __pte(0)); } while (0)
+
+#define pmd_none(x) (!pmd_val(x))
+/* pmd_present doesn't just test the _PAGE_PRESENT bit since wr.p.t.
+ can temporarily clear it. */
+#define pmd_present(x) (pmd_val(x))
+/* pmd_clear below */
+#define pmd_bad(x) ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER & ~_PAGE_PRESENT)) != (_KERNPG_TABLE & ~_PAGE_PRESENT))
+
+
+#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
+
+/*
+ * The following only work if pte_present() is true.
+ * Undefined behaviour if not..
+ */
+static inline int pte_user(pte_t pte) { return (pte).pte_low & _PAGE_USER; }
+static inline int pte_read(pte_t pte) { return (pte).pte_low & _PAGE_USER; }
+static inline int pte_dirty(pte_t pte) { return (pte).pte_low & _PAGE_DIRTY; }
+static inline int pte_young(pte_t pte) { return (pte).pte_low & _PAGE_ACCESSED; }
+static inline int pte_write(pte_t pte) { return (pte).pte_low & _PAGE_RW; }
+
+/*
+ * The following only works if pte_present() is not true.
+ */
+static inline int pte_file(pte_t pte) { return (pte).pte_low & _PAGE_FILE; }
+
+static inline pte_t pte_rdprotect(pte_t pte) { (pte).pte_low &= ~_PAGE_USER; return pte; }
+static inline pte_t pte_exprotect(pte_t pte) { (pte).pte_low &= ~_PAGE_USER; return pte; }
+static inline pte_t pte_mkclean(pte_t pte) { (pte).pte_low &= ~_PAGE_DIRTY; return pte; }
+static inline pte_t pte_mkold(pte_t pte) { (pte).pte_low &= ~_PAGE_ACCESSED; return pte; }
+static inline pte_t pte_wrprotect(pte_t pte) { (pte).pte_low &= ~_PAGE_RW; return pte; }
+static inline pte_t pte_mkread(pte_t pte) { (pte).pte_low |= _PAGE_USER; return pte; }
+static inline pte_t pte_mkexec(pte_t pte) { (pte).pte_low |= _PAGE_USER; return pte; }
+static inline pte_t pte_mkdirty(pte_t pte) { (pte).pte_low |= _PAGE_DIRTY; return pte; }
+static inline pte_t pte_mkyoung(pte_t pte) { (pte).pte_low |= _PAGE_ACCESSED; return pte; }
+static inline pte_t pte_mkwrite(pte_t pte) { (pte).pte_low |= _PAGE_RW; return pte; }
+
+#ifdef CONFIG_X86_PAE
+# include <asm/pgtable-3level.h>
+#else
+# include <asm/pgtable-2level.h>
+#endif
+
+static inline int ptep_test_and_clear_dirty(pte_t *ptep)
+{
+ pte_t pte = *ptep;
+ int ret = pte_dirty(pte);
+ if (ret)
+ xen_l1_entry_update(ptep, pte_mkclean(pte).pte_low);
+ return ret;
+}
+
+static inline int ptep_test_and_clear_young(pte_t *ptep)
+{
+ pte_t pte = *ptep;
+ int ret = pte_young(pte);
+ if (ret)
+ xen_l1_entry_update(ptep, pte_mkold(pte).pte_low);
+ return ret;
+}
+
+static inline void ptep_set_wrprotect(pte_t *ptep)
+{
+ pte_t pte = *ptep;
+ if (pte_write(pte))
+ set_pte(ptep, pte_wrprotect(pte));
+}
+static inline void ptep_mkdirty(pte_t *ptep)
+{
+ pte_t pte = *ptep;
+ if (!pte_dirty(pte))
+ xen_l1_entry_update(ptep, pte_mkdirty(pte).pte_low);
+}
+
+/*
+ * Macro to mark a page protection value as "uncacheable". On processors which do not support
+ * it, this is a no-op.
+ */
+#define pgprot_noncached(prot) ((boot_cpu_data.x86 > 3) \
+ ? (__pgprot(pgprot_val(prot) | _PAGE_PCD | _PAGE_PWT)) : (prot))
+
+/*
+ * Conversion functions: convert a page and protection to a page entry,
+ * and a page entry and page directory to the page they refer to.
+ */
+
+#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
+#define mk_pte_huge(entry) ((entry).pte_low |= _PAGE_PRESENT | _PAGE_PSE)
+
+static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
+{
+ pte.pte_low &= _PAGE_CHG_MASK;
+ pte.pte_low |= pgprot_val(newprot);
+#ifdef CONFIG_X86_PAE
+ /*
+ * Chop off the NX bit (if present), and add the NX portion of
+ * the newprot (if present):
+ */
+ pte.pte_high &= ~(1 << (_PAGE_BIT_NX - 32));
+ pte.pte_high |= (pgprot_val(newprot) >> 32) & \
+ (__supported_pte_mask >> 32);
+#endif
+ return pte;
+}
+
+#define page_pte(page) page_pte_prot(page, __pgprot(0))
+
+#define pmd_page_kernel(pmd) \
+((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
+
+#define pmd_clear(xp) do { \
+ set_pmd(xp, __pmd(0)); \
+ xen_flush_page_update_queue(); \
+} while (0)
+
+#ifndef CONFIG_DISCONTIGMEM
+#define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
+#endif /* !CONFIG_DISCONTIGMEM */
+
+#define pmd_large(pmd) \
+ ((pmd_val(pmd) & (_PAGE_PSE|_PAGE_PRESENT)) == (_PAGE_PSE|_PAGE_PRESENT))
+
+/*
+ * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD]
+ *
+ * this macro returns the index of the entry in the pgd page which would
+ * control the given virtual address
+ */
+#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
+
+/*
+ * pgd_offset() returns a (pgd_t *)
+ * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
+ */
+#define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address))
+
+/*
+ * a shortcut which implies the use of the kernel's pgd, instead
+ * of a process's
+ */
+#define pgd_offset_k(address) pgd_offset(&init_mm, address)
+
+/*
+ * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
+ *
+ * this macro returns the index of the entry in the pmd page which would
+ * control the given virtual address
+ */
+#define pmd_index(address) \
+ (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
+
+/*
+ * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
+ *
+ * this macro returns the index of the entry in the pte page which would
+ * control the given virtual address
+ */
+#define pte_index(address) \
+ (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
+#define pte_offset_kernel(dir, address) \
+ ((pte_t *) pmd_page_kernel(*(dir)) + pte_index(address))
+
+/*
+ * Helper function that returns the kernel pagetable entry controlling
+ * the virtual address 'address'. NULL means no pagetable entry present.
+ * NOTE: the return type is pte_t but if the pmd is PSE then we return it
+ * as a pte too.
+ */
+extern pte_t *lookup_address(unsigned long address);
+
+/*
+ * Make a given kernel text page executable/non-executable.
+ * Returns the previous executability setting of that page (which
+ * is used to restore the previous state). Used by the SMP bootup code.
+ * NOTE: this is an __init function for security reasons.
+ */
+#ifdef CONFIG_X86_PAE
+ extern int set_kernel_exec(unsigned long vaddr, int enable);
+#else
+ static inline int set_kernel_exec(unsigned long vaddr, int enable) { return 0;}
+#endif
+
+#if defined(CONFIG_HIGHPTE)
+#define pte_offset_map(dir, address) \
+ ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)),KM_PTE0) + \
+ pte_index(address))
+#define pte_offset_map_nested(dir, address) \
+ ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)),KM_PTE1) + \
+ pte_index(address))
+#define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0)
+#define pte_unmap_nested(pte) kunmap_atomic(pte, KM_PTE1)
+#else
+#define pte_offset_map(dir, address) \
+ ((pte_t *)page_address(pmd_page(*(dir))) + pte_index(address))
+#define pte_offset_map_nested(dir, address) pte_offset_map(dir, address)
+#define pte_unmap(pte) do { } while (0)
+#define pte_unmap_nested(pte) do { } while (0)
+#endif
+
+/*
+ * The i386 doesn't have any external MMU info: the kernel page
+ * tables contain all the necessary information.
+ *
+ * Also, we only update the dirty/accessed state if we set
+ * the dirty bit by hand in the kernel, since the hardware
+ * will do the accessed bit for us, and we don't want to
+ * race with other CPU's that might be updating the dirty
+ * bit at the same time.
+ */
+#define update_mmu_cache(vma,address,pte) do { } while (0)
+#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
+
+#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
+ do { \
+ if (__dirty) { \
- static inline unsigned long arbitrary_virt_to_machine(void *va)
- {
- pgd_t *pgd = pgd_offset_k((unsigned long)va);
- pmd_t *pmd = pmd_offset(pgd, (unsigned long)va);
- pte_t *pte = pte_offset_kernel(pmd, (unsigned long)va);
- unsigned long pa = (*(unsigned long *)pte) & PAGE_MASK;
- return pa | ((unsigned long)va & (PAGE_SIZE-1));
- }
++ if ( likely((__vma)->vm_mm == current->mm) ) { \
++ xen_flush_page_update_queue(); \
++ HYPERVISOR_update_va_mapping((__address)>>PAGE_SHIFT, (__entry), UVMF_INVLPG); \
++ } else { \
++ xen_l1_entry_update((__ptep), (__entry).pte_low); \
++ flush_tlb_page((__vma), (__address)); \
++ } \
+ } \
+ } while (0)
+
+#define __HAVE_ARCH_PTEP_ESTABLISH
+#define ptep_establish(__vma, __address, __ptep, __entry) \
+do { \
+ ptep_set_access_flags(__vma, __address, __ptep, __entry, 1); \
+} while (0)
+
+#define __HAVE_ARCH_PTEP_ESTABLISH_NEW
+#define ptep_establish_new(__vma, __address, __ptep, __entry) \
+do { \
+ if (likely((__vma)->vm_mm == current->mm)) { \
+ xen_flush_page_update_queue(); \
+ HYPERVISOR_update_va_mapping((__address)>>PAGE_SHIFT, \
+ __entry, 0); \
+ } else { \
+ xen_l1_entry_update((__ptep), (__entry).pte_low); \
+ } \
+} while (0)
+
+/* NOTE: make_page* callers must call flush_page_update_queue() */
+void make_lowmem_page_readonly(void *va);
+void make_lowmem_page_writable(void *va);
+void make_page_readonly(void *va);
+void make_page_writable(void *va);
+void make_pages_readonly(void *va, unsigned int nr);
+void make_pages_writable(void *va, unsigned int nr);
+
++#define arbitrary_virt_to_machine(__va) \
++({ \
++ pgd_t *__pgd = pgd_offset_k((unsigned long)(__va)); \
++ pmd_t *__pmd = pmd_offset(__pgd, (unsigned long)(__va)); \
++ pte_t *__pte = pte_offset_kernel(__pmd, (unsigned long)(__va)); \
++ unsigned long __pa = (*(unsigned long *)__pte) & PAGE_MASK; \
++ __pa | ((unsigned long)(__va) & (PAGE_SIZE-1)); \
++})
+
+#endif /* !__ASSEMBLY__ */
+
+#ifndef CONFIG_DISCONTIGMEM
+#define kern_addr_valid(addr) (1)
+#endif /* !CONFIG_DISCONTIGMEM */
+
++int direct_remap_area_pages(struct mm_struct *mm,
++ unsigned long address,
++ unsigned long machine_addr,
++ unsigned long size,
++ pgprot_t prot,
++ domid_t domid);
++int __direct_remap_area_pages(struct mm_struct *mm,
++ unsigned long address,
++ unsigned long size,
++ mmu_update_t *v);
++
+#define io_remap_page_range(vma,from,phys,size,prot) \
+ direct_remap_area_pages(vma->vm_mm,from,phys,size,prot,DOMID_IO)
+
+int direct_remap_area_pages(struct mm_struct *mm,
+ unsigned long address,
+ unsigned long machine_addr,
+ unsigned long size,
+ pgprot_t prot,
+ domid_t domid);
+int __direct_remap_area_pages(struct mm_struct *mm,
+ unsigned long address,
+ unsigned long size,
+ mmu_update_t *v);
+
+#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
+#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
+#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
+#define __HAVE_ARCH_PTEP_SET_WRPROTECT
+#define __HAVE_ARCH_PTEP_MKDIRTY
+#define __HAVE_ARCH_PTE_SAME
+#include <asm-generic/pgtable.h>
+
+#endif /* _I386_PGTABLE_H */
--- /dev/null
- /* arch/xen/i386/mm/init.c */
- /* NOTE: caller must call flush_page_update_queue() */
- #define PROT_ON 1
- #define PROT_OFF 0
- void /* __init */ protect_page(pgd_t *dpgd, void *page, int mode);
- void /* __init */ protect_pagetable(pgd_t *dpgd, pgd_t *spgd, int mode);
-
+/******************************************************************************
+ * hypervisor.h
+ *
+ * Linux-specific hypervisor handling.
+ *
+ * Copyright (c) 2002-2004, K A Fraser
+ *
+ * This file may be distributed separately from the Linux kernel, or
+ * incorporated into other software packages, subject to the following license:
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef __HYPERVISOR_H__
+#define __HYPERVISOR_H__
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/version.h>
+#include <asm-xen/xen-public/xen.h>
+#include <asm-xen/xen-public/dom0_ops.h>
+#include <asm-xen/xen-public/io/domain_controller.h>
+#include <asm/ptrace.h>
+#include <asm/page.h>
+
+/* arch/xen/i386/kernel/setup.c */
+union xen_start_info_union
+{
+ start_info_t xen_start_info;
+ char padding[512];
+};
+extern union xen_start_info_union xen_start_info_union;
+#define xen_start_info (xen_start_info_union.xen_start_info)
+
+/* arch/xen/kernel/process.c */
+void xen_cpu_idle (void);
+
+/* arch/xen/i386/kernel/hypervisor.c */
+void do_hypervisor_callback(struct pt_regs *regs);
+
- /* Deallocate a contiguous region of low memory. Return it to the allocator. */
- void deallocate_lowmem_region(unsigned long vstart, unsigned long pages);
+/* arch/xen/i386/kernel/head.S */
+void lgdt_finish(void);
+
+/* arch/xen/i386/mm/hypervisor.c */
+/*
+ * NB. ptr values should be PHYSICAL, not MACHINE. 'vals' should be already
+ * be MACHINE addresses.
+ */
+
+extern unsigned int mmu_update_queue_idx;
+
+void queue_l1_entry_update(pte_t *ptr, unsigned long val);
+void queue_l2_entry_update(pmd_t *ptr, unsigned long val);
+void queue_pt_switch(unsigned long ptr);
+void queue_tlb_flush(void);
+void queue_invlpg(unsigned long ptr);
+void queue_pgd_pin(unsigned long ptr);
+void queue_pgd_unpin(unsigned long ptr);
+void queue_pte_pin(unsigned long ptr);
+void queue_pte_unpin(unsigned long ptr);
+void queue_set_ldt(unsigned long ptr, unsigned long bytes);
+void queue_machphys_update(unsigned long mfn, unsigned long pfn);
+void xen_l1_entry_update(pte_t *ptr, unsigned long val);
+void xen_l2_entry_update(pmd_t *ptr, unsigned long val);
+void xen_pt_switch(unsigned long ptr);
+void xen_tlb_flush(void);
+void xen_invlpg(unsigned long ptr);
+void xen_pgd_pin(unsigned long ptr);
+void xen_pgd_unpin(unsigned long ptr);
+void xen_pte_pin(unsigned long ptr);
+void xen_pte_unpin(unsigned long ptr);
+void xen_set_ldt(unsigned long ptr, unsigned long bytes);
+void xen_machphys_update(unsigned long mfn, unsigned long pfn);
+
+void _flush_page_update_queue(void);
+static inline int flush_page_update_queue(void)
+{
+ unsigned int idx = mmu_update_queue_idx;
+ if ( idx != 0 ) _flush_page_update_queue();
+ return idx;
+}
+#define xen_flush_page_update_queue() (_flush_page_update_queue())
+#define XEN_flush_page_update_queue() (_flush_page_update_queue())
+void MULTICALL_flush_page_update_queue(void);
+
+#ifdef CONFIG_XEN_PHYSDEV_ACCESS
+/* Allocate a contiguous empty region of low memory. Return virtual start. */
+unsigned long allocate_empty_lowmem_region(unsigned long pages);
+#endif
+
+/*
+ * Assembler stubs for hyper-calls.
+ */
+
+static inline int
+HYPERVISOR_set_trap_table(
+ trap_info_t *table)
+{
+ int ret;
+ unsigned long ignore;
+
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret), "=b" (ignore)
+ : "0" (__HYPERVISOR_set_trap_table), "1" (table)
+ : "memory" );
+
+ return ret;
+}
+
+static inline int
+HYPERVISOR_mmu_update(
+ mmu_update_t *req, int count, int *success_count)
+{
+ int ret;
+ unsigned long ign1, ign2, ign3;
+
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret), "=b" (ign1), "=c" (ign2), "=d" (ign3)
+ : "0" (__HYPERVISOR_mmu_update), "1" (req), "2" (count),
+ "3" (success_count)
+ : "memory" );
+
+ return ret;
+}
+
+static inline int
+HYPERVISOR_set_gdt(
+ unsigned long *frame_list, int entries)
+{
+ int ret;
+ unsigned long ign1, ign2;
+
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret), "=b" (ign1), "=c" (ign2)
+ : "0" (__HYPERVISOR_set_gdt), "1" (frame_list), "2" (entries)
+ : "memory" );
+
+
+ return ret;
+}
+
+static inline int
+HYPERVISOR_stack_switch(
+ unsigned long ss, unsigned long esp)
+{
+ int ret;
+ unsigned long ign1, ign2;
+
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret), "=b" (ign1), "=c" (ign2)
+ : "0" (__HYPERVISOR_stack_switch), "1" (ss), "2" (esp)
+ : "memory" );
+
+ return ret;
+}
+
+static inline int
+HYPERVISOR_set_callbacks(
+ unsigned long event_selector, unsigned long event_address,
+ unsigned long failsafe_selector, unsigned long failsafe_address)
+{
+ int ret;
+ unsigned long ign1, ign2, ign3, ign4;
+
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret), "=b" (ign1), "=c" (ign2), "=d" (ign3), "=S" (ign4)
+ : "0" (__HYPERVISOR_set_callbacks), "1" (event_selector),
+ "2" (event_address), "3" (failsafe_selector), "4" (failsafe_address)
+ : "memory" );
+
+ return ret;
+}
+
+static inline int
+HYPERVISOR_fpu_taskswitch(
+ void)
+{
+ int ret;
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret) : "0" (__HYPERVISOR_fpu_taskswitch) : "memory" );
+
+ return ret;
+}
+
+static inline int
+HYPERVISOR_yield(
+ void)
+{
+ int ret;
+ unsigned long ign;
+
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret), "=b" (ign)
+ : "0" (__HYPERVISOR_sched_op), "1" (SCHEDOP_yield)
+ : "memory" );
+
+ return ret;
+}
+
+static inline int
+HYPERVISOR_block(
+ void)
+{
+ int ret;
+ unsigned long ign1;
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret), "=b" (ign1)
+ : "0" (__HYPERVISOR_sched_op), "1" (SCHEDOP_block)
+ : "memory" );
+
+ return ret;
+}
+
+static inline int
+HYPERVISOR_shutdown(
+ void)
+{
+ int ret;
+ unsigned long ign1;
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret), "=b" (ign1)
+ : "0" (__HYPERVISOR_sched_op),
+ "1" (SCHEDOP_shutdown | (SHUTDOWN_poweroff << SCHEDOP_reasonshift))
+ : "memory" );
+
+ return ret;
+}
+
+static inline int
+HYPERVISOR_reboot(
+ void)
+{
+ int ret;
+ unsigned long ign1;
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret), "=b" (ign1)
+ : "0" (__HYPERVISOR_sched_op),
+ "1" (SCHEDOP_shutdown | (SHUTDOWN_reboot << SCHEDOP_reasonshift))
+ : "memory" );
+
+ return ret;
+}
+
+static inline int
+HYPERVISOR_suspend(
+ unsigned long srec)
+{
+ int ret;
+ unsigned long ign1, ign2;
+
+ /* NB. On suspend, control software expects a suspend record in %esi. */
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret), "=b" (ign1), "=S" (ign2)
+ : "0" (__HYPERVISOR_sched_op),
+ "b" (SCHEDOP_shutdown | (SHUTDOWN_suspend << SCHEDOP_reasonshift)),
+ "S" (srec) : "memory");
+
+ return ret;
+}
+
+static inline long
+HYPERVISOR_set_timer_op(
+ u64 timeout)
+{
+ int ret;
+ unsigned long timeout_hi = (unsigned long)(timeout>>32);
+ unsigned long timeout_lo = (unsigned long)timeout;
+ unsigned long ign1, ign2;
+
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret), "=b" (ign1), "=c" (ign2)
+ : "0" (__HYPERVISOR_set_timer_op), "b" (timeout_hi), "c" (timeout_lo)
+ : "memory");
+
+ return ret;
+}
+
+static inline int
+HYPERVISOR_dom0_op(
+ dom0_op_t *dom0_op)
+{
+ int ret;
+ unsigned long ign1;
+
+ dom0_op->interface_version = DOM0_INTERFACE_VERSION;
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret), "=b" (ign1)
+ : "0" (__HYPERVISOR_dom0_op), "1" (dom0_op)
+ : "memory");
+
+ return ret;
+}
+
+static inline int
+HYPERVISOR_set_debugreg(
+ int reg, unsigned long value)
+{
+ int ret;
+ unsigned long ign1, ign2;
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret), "=b" (ign1), "=c" (ign2)
+ : "0" (__HYPERVISOR_set_debugreg), "1" (reg), "2" (value)
+ : "memory" );
+
+ return ret;
+}
+
+static inline unsigned long
+HYPERVISOR_get_debugreg(
+ int reg)
+{
+ unsigned long ret;
+ unsigned long ign;
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret), "=b" (ign)
+ : "0" (__HYPERVISOR_get_debugreg), "1" (reg)
+ : "memory" );
+
+ return ret;
+}
+
+static inline int
+HYPERVISOR_update_descriptor(
+ unsigned long ma, unsigned long word1, unsigned long word2)
+{
+ int ret;
+ unsigned long ign1, ign2, ign3;
+
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret), "=b" (ign1), "=c" (ign2), "=d" (ign3)
+ : "0" (__HYPERVISOR_update_descriptor), "1" (ma), "2" (word1),
+ "3" (word2)
+ : "memory" );
+
+ return ret;
+}
+
+static inline int
+HYPERVISOR_set_fast_trap(
+ int idx)
+{
+ int ret;
+ unsigned long ign;
+
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret), "=b" (ign)
+ : "0" (__HYPERVISOR_set_fast_trap), "1" (idx)
+ : "memory" );
+
+ return ret;
+}
+
+static inline int
+HYPERVISOR_dom_mem_op(
+ unsigned int op, unsigned long *extent_list,
+ unsigned long nr_extents, unsigned int extent_order)
+{
+ int ret;
+ unsigned long ign1, ign2, ign3, ign4, ign5;
+
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret), "=b" (ign1), "=c" (ign2), "=d" (ign3), "=S" (ign4),
+ "=D" (ign5)
+ : "0" (__HYPERVISOR_dom_mem_op), "1" (op), "2" (extent_list),
+ "3" (nr_extents), "4" (extent_order), "5" (DOMID_SELF)
+ : "memory" );
+
+ return ret;
+}
+
+static inline int
+HYPERVISOR_multicall(
+ void *call_list, int nr_calls)
+{
+ int ret;
+ unsigned long ign1, ign2;
+
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret), "=b" (ign1), "=c" (ign2)
+ : "0" (__HYPERVISOR_multicall), "1" (call_list), "2" (nr_calls)
+ : "memory" );
+
+ return ret;
+}
+
+static inline int
+HYPERVISOR_update_va_mapping(
+ unsigned long page_nr, pte_t new_val, unsigned long flags)
+{
+ int ret;
+ unsigned long ign1, ign2, ign3;
+
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret), "=b" (ign1), "=c" (ign2), "=d" (ign3)
+ : "0" (__HYPERVISOR_update_va_mapping),
+ "1" (page_nr), "2" ((new_val).pte_low), "3" (flags)
+ : "memory" );
+
+ if ( unlikely(ret < 0) )
+ {
+ printk(KERN_ALERT "Failed update VA mapping: %08lx, %08lx, %08lx\n",
+ page_nr, (new_val).pte_low, flags);
+ BUG();
+ }
+
+ return ret;
+}
+
+static inline int
+HYPERVISOR_event_channel_op(
+ void *op)
+{
+ int ret;
+ unsigned long ignore;
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret), "=b" (ignore)
+ : "0" (__HYPERVISOR_event_channel_op), "1" (op)
+ : "memory" );
+
+ return ret;
+}
+
+static inline int
+HYPERVISOR_xen_version(
+ int cmd)
+{
+ int ret;
+ unsigned long ignore;
+
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret), "=b" (ignore)
+ : "0" (__HYPERVISOR_xen_version), "1" (cmd)
+ : "memory" );
+
+ return ret;
+}
+
+static inline int
+HYPERVISOR_console_io(
+ int cmd, int count, char *str)
+{
+ int ret;
+ unsigned long ign1, ign2, ign3;
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret), "=b" (ign1), "=c" (ign2), "=d" (ign3)
+ : "0" (__HYPERVISOR_console_io), "1" (cmd), "2" (count), "3" (str)
+ : "memory" );
+
+ return ret;
+}
+
+static inline int
+HYPERVISOR_physdev_op(
+ void *physdev_op)
+{
+ int ret;
+ unsigned long ign;
+
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret), "=b" (ign)
+ : "0" (__HYPERVISOR_physdev_op), "1" (physdev_op)
+ : "memory" );
+
+ return ret;
+}
+
+static inline int
+HYPERVISOR_grant_table_op(
+ unsigned int cmd, void *uop, unsigned int count)
+{
+ int ret;
+ unsigned long ign1, ign2, ign3;
+
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret), "=b" (ign1), "=c" (ign2), "=d" (ign3)
+ : "0" (__HYPERVISOR_grant_table_op), "1" (cmd), "2" (count), "3" (uop)
+ : "memory" );
+
+ return ret;
+}
+
+static inline int
+HYPERVISOR_update_va_mapping_otherdomain(
+ unsigned long page_nr, pte_t new_val, unsigned long flags, domid_t domid)
+{
+ int ret;
+ unsigned long ign1, ign2, ign3, ign4;
+
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret), "=b" (ign1), "=c" (ign2), "=d" (ign3), "=S" (ign4)
+ : "0" (__HYPERVISOR_update_va_mapping_otherdomain),
+ "1" (page_nr), "2" ((new_val).pte_low), "3" (flags), "4" (domid) :
+ "memory" );
+
+ return ret;
+}
+
+static inline int
+HYPERVISOR_vm_assist(
+ unsigned int cmd, unsigned int type)
+{
+ int ret;
+ unsigned long ign1, ign2;
+
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret), "=b" (ign1), "=c" (ign2)
+ : "0" (__HYPERVISOR_vm_assist), "1" (cmd), "2" (type)
+ : "memory" );
+
+ return ret;
+}
+
+#endif /* __HYPERVISOR_H__ */